From adca473021dc8cb2397929918038f76b56a4595d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 11 May 2012 18:01:31 +0100 Subject: drm/i915: All members of gen4 have hotplug, so unconditionally enable its irq Also as we set the HOTPLUG_EN to 0 during pre-install, we can simply set it during post-install, and nor do we wish to enable unwanted hotplug events. Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 75 +++++++++++++++++------------------------ 1 file changed, 31 insertions(+), 44 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index cc4a63307611..e31515aded8c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2320,10 +2320,8 @@ static void i965_irq_preinstall(struct drm_device * dev) atomic_set(&dev_priv->irq_received, 0); - if (I915_HAS_HOTPLUG(dev)) { - I915_WRITE(PORT_HOTPLUG_EN, 0); - I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); - } + I915_WRITE(PORT_HOTPLUG_EN, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); I915_WRITE(HWSTAM, 0xeffe); for_each_pipe(pipe) @@ -2336,11 +2334,13 @@ static void i965_irq_preinstall(struct drm_device * dev) static int i965_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 hotplug_en; u32 enable_mask; u32 error_mask; /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | + I915_DISPLAY_PORT_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | @@ -2356,13 +2356,6 @@ static int i965_irq_postinstall(struct drm_device *dev) dev_priv->pipestat[0] = 0; dev_priv->pipestat[1] = 0; - if (I915_HAS_HOTPLUG(dev)) { - /* Enable in IER... */ - enable_mask |= I915_DISPLAY_PORT_INTERRUPT; - /* and unmask in IMR */ - dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; - } - /* * Enable some error detection, note the instruction error mask * bit is reserved, so we leave it masked. @@ -2382,36 +2375,33 @@ static int i965_irq_postinstall(struct drm_device *dev) I915_WRITE(IER, enable_mask); POSTING_READ(IER); - if (I915_HAS_HOTPLUG(dev)) { - u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); - - /* Note HDMI and DP share bits */ - if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) - hotplug_en |= HDMIB_HOTPLUG_INT_EN; - if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) - hotplug_en |= HDMIC_HOTPLUG_INT_EN; - if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) - hotplug_en |= HDMID_HOTPLUG_INT_EN; - if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) - hotplug_en |= SDVOC_HOTPLUG_INT_EN; - if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) - hotplug_en |= SDVOB_HOTPLUG_INT_EN; - if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { - hotplug_en |= CRT_HOTPLUG_INT_EN; + /* Note HDMI and DP share hotplug bits */ + hotplug_en = 0; + if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) + hotplug_en |= HDMIB_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) + hotplug_en |= HDMIC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) + hotplug_en |= HDMID_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) + hotplug_en |= SDVOC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) + hotplug_en |= SDVOB_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { + hotplug_en |= CRT_HOTPLUG_INT_EN; - /* Programming the CRT detection parameters tends - to generate a spurious hotplug event about three - seconds later. So just do it once. - */ - if (IS_G4X(dev)) - hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; - hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; - } + /* Programming the CRT detection parameters tends + to generate a spurious hotplug event about three + seconds later. So just do it once. + */ + if (IS_G4X(dev)) + hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; + hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; + } - /* Ignore TV since it's buggy */ + /* Ignore TV since it's buggy */ - I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); - } + I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); intel_opregion_enable_asle(dev); @@ -2469,8 +2459,7 @@ static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS) ret = IRQ_HANDLED; /* Consume port. Then clear IIR or we'll miss events */ - if ((I915_HAS_HOTPLUG(dev)) && - (iir & I915_DISPLAY_PORT_INTERRUPT)) { + if (iir & I915_DISPLAY_PORT_INTERRUPT) { u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", @@ -2543,10 +2532,8 @@ static void i965_irq_uninstall(struct drm_device * dev) if (!dev_priv) return; - if (I915_HAS_HOTPLUG(dev)) { - I915_WRITE(PORT_HOTPLUG_EN, 0); - I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); - } + I915_WRITE(PORT_HOTPLUG_EN, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); I915_WRITE(HWSTAM, 0xffffffff); for_each_pipe(pipe) -- cgit v1.2.3 From 10f76a38166cd51160b3cd7ba2f16339dd381589 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 11 May 2012 18:01:32 +0100 Subject: drm/i915: Inspect the right status bits for DP/HDMI hotplug on gen4 The status bits corresponding to the interrupt enable bits are the "live" hotplug status bits, and reflect the current status of the port (high for a detected connection, low for a disconnect). The actual bits corresponding to the interrupt source are elsewhere. The actual event is then determined by a combination of the interrupt flag and the current live status (if the interrupt is active, but the current status is not, then we have detected a disconnect.) Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 21 +++++++++++++++------ drivers/gpu/drm/i915/intel_dp.c | 12 +++++------- 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 2d49b9507ed0..7e0977b0ff2b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1558,12 +1558,21 @@ #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) #define PORT_HOTPLUG_STAT 0x61114 -#define HDMIB_HOTPLUG_INT_STATUS (1 << 29) -#define DPB_HOTPLUG_INT_STATUS (1 << 29) -#define HDMIC_HOTPLUG_INT_STATUS (1 << 28) -#define DPC_HOTPLUG_INT_STATUS (1 << 28) -#define HDMID_HOTPLUG_INT_STATUS (1 << 27) -#define DPD_HOTPLUG_INT_STATUS (1 << 27) +/* HDMI/DP bits are gen4+ */ +#define DPB_HOTPLUG_LIVE_STATUS (1 << 29) +#define DPC_HOTPLUG_LIVE_STATUS (1 << 28) +#define DPD_HOTPLUG_LIVE_STATUS (1 << 27) +#define DPD_HOTPLUG_INT_STATUS (3 << 21) +#define DPC_HOTPLUG_INT_STATUS (3 << 19) +#define DPB_HOTPLUG_INT_STATUS (3 << 17) +/* HDMI bits are shared with the DP bits */ +#define HDMIB_HOTPLUG_LIVE_STATUS (1 << 29) +#define HDMIC_HOTPLUG_LIVE_STATUS (1 << 28) +#define HDMID_HOTPLUG_LIVE_STATUS (1 << 27) +#define HDMID_HOTPLUG_INT_STATUS (3 << 21) +#define HDMIC_HOTPLUG_INT_STATUS (3 << 19) +#define HDMIB_HOTPLUG_INT_STATUS (3 << 17) +/* CRT/TV/SDVO common between gen3+ */ #define CRT_HOTPLUG_INT_STATUS (1 << 11) #define TV_HOTPLUG_INT_STATUS (1 << 10) #define CRT_HOTPLUG_MONITOR_MASK (3 << 8) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index a1a5ce71558a..c686393abe6d 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -2066,25 +2066,23 @@ g4x_dp_detect(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t temp, bit; + uint32_t bit; switch (intel_dp->output_reg) { case DP_B: - bit = DPB_HOTPLUG_INT_STATUS; + bit = DPB_HOTPLUG_LIVE_STATUS; break; case DP_C: - bit = DPC_HOTPLUG_INT_STATUS; + bit = DPC_HOTPLUG_LIVE_STATUS; break; case DP_D: - bit = DPD_HOTPLUG_INT_STATUS; + bit = DPD_HOTPLUG_LIVE_STATUS; break; default: return connector_status_unknown; } - temp = I915_READ(PORT_HOTPLUG_STAT); - - if ((temp & bit) == 0) + if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) return connector_status_disconnected; return intel_dp_detect_dpcd(intel_dp); -- cgit v1.2.3 From 084b612ecf8e59973576b2f644e6949609c79375 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 11 May 2012 18:01:33 +0100 Subject: drm/i915: SDVO hotplug have different interrupt status bits for i915/i965/g4x Note that gen3 is the only platform where we've got the bit definitions right, hence the workaround of disabling sdvo hotplug support on i945g/gm is not due to misdiagnosis of broken hotplug irq handling ... Signed-off-by: Chris Wilson [danvet: add some blurb about sdvo hotplug fail on i945g/gm I've wondered about while reviewing.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 19 +++++++++++++------ drivers/gpu/drm/i915/i915_reg.h | 11 ++++++++--- drivers/gpu/drm/i915/intel_sdvo.c | 17 +++++++++++++---- 3 files changed, 34 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e31515aded8c..5134a62ff82f 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2158,9 +2158,9 @@ static int i915_irq_postinstall(struct drm_device *dev) hotplug_en |= HDMIC_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) hotplug_en |= HDMID_HOTPLUG_INT_EN; - if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) + if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) hotplug_en |= SDVOC_HOTPLUG_INT_EN; - if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) + if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) hotplug_en |= SDVOB_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { hotplug_en |= CRT_HOTPLUG_INT_EN; @@ -2383,10 +2383,17 @@ static int i965_irq_postinstall(struct drm_device *dev) hotplug_en |= HDMIC_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) hotplug_en |= HDMID_HOTPLUG_INT_EN; - if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) - hotplug_en |= SDVOC_HOTPLUG_INT_EN; - if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) - hotplug_en |= SDVOB_HOTPLUG_INT_EN; + if (IS_G4X(dev)) { + if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X) + hotplug_en |= SDVOC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X) + hotplug_en |= SDVOB_HOTPLUG_INT_EN; + } else { + if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965) + hotplug_en |= SDVOC_HOTPLUG_INT_EN; + if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965) + hotplug_en |= SDVOB_HOTPLUG_INT_EN; + } if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { hotplug_en |= CRT_HOTPLUG_INT_EN; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 7e0977b0ff2b..0f45a18a5e41 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1572,15 +1572,20 @@ #define HDMID_HOTPLUG_INT_STATUS (3 << 21) #define HDMIC_HOTPLUG_INT_STATUS (3 << 19) #define HDMIB_HOTPLUG_INT_STATUS (3 << 17) -/* CRT/TV/SDVO common between gen3+ */ +/* CRT/TV common between gen3+ */ #define CRT_HOTPLUG_INT_STATUS (1 << 11) #define TV_HOTPLUG_INT_STATUS (1 << 10) #define CRT_HOTPLUG_MONITOR_MASK (3 << 8) #define CRT_HOTPLUG_MONITOR_COLOR (3 << 8) #define CRT_HOTPLUG_MONITOR_MONO (2 << 8) #define CRT_HOTPLUG_MONITOR_NONE (0 << 8) -#define SDVOC_HOTPLUG_INT_STATUS (1 << 7) -#define SDVOB_HOTPLUG_INT_STATUS (1 << 6) +/* SDVO is different across gen3/4 */ +#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3) +#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2) +#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4) +#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2) +#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7) +#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6) /* SDVO port control */ #define SDVOB 0x61140 diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 125228e77c50..e0c93e022995 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2511,6 +2511,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *intel_encoder; struct intel_sdvo *intel_sdvo; + u32 hotplug_mask; int i; intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); @@ -2542,10 +2543,18 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) } } - if (intel_sdvo->is_sdvob) - dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; - else - dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; + hotplug_mask = 0; + if (IS_G4X(dev)) { + hotplug_mask = intel_sdvo->is_sdvob ? + SDVOB_HOTPLUG_INT_STATUS_G4X : SDVOC_HOTPLUG_INT_STATUS_G4X; + } else if (IS_GEN4(dev)) { + hotplug_mask = intel_sdvo->is_sdvob ? + SDVOB_HOTPLUG_INT_STATUS_I965 : SDVOC_HOTPLUG_INT_STATUS_I965; + } else { + hotplug_mask = intel_sdvo->is_sdvob ? + SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915; + } + dev_priv->hotplug_supported_mask |= hotplug_mask; drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs); -- cgit v1.2.3 From 8ec22b214d76773c9d89f4040505ce10f677ed9a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 11 May 2012 18:01:34 +0100 Subject: drm/i915/hdmi: Query the live connector status bit for G4x Similar to g4x_dp_detect() we should probe the PORT_HOTPLUG_STATUS as to whether the connector is active prior to attempting to retrieve the EDID. Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_hdmi.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 2ead3bf7c21d..77f0f8fb9e35 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -452,6 +452,30 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, return true; } +static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi) +{ + struct drm_device *dev = intel_hdmi->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t bit; + + switch (intel_hdmi->sdvox_reg) { + case HDMIB: + bit = HDMIB_HOTPLUG_LIVE_STATUS; + break; + case HDMIC: + bit = HDMIC_HOTPLUG_LIVE_STATUS; + break; + case HDMID: + bit = HDMID_HOTPLUG_LIVE_STATUS; + break; + default: + bit = 0; + break; + } + + return I915_READ(PORT_HOTPLUG_STAT) & bit; +} + static enum drm_connector_status intel_hdmi_detect(struct drm_connector *connector, bool force) { @@ -460,6 +484,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) struct edid *edid; enum drm_connector_status status = connector_status_disconnected; + if (IS_G4X(connector->dev) && !g4x_hdmi_connected(intel_hdmi)) + return status; + intel_hdmi->has_hdmi_sink = false; intel_hdmi->has_audio = false; edid = drm_get_edid(connector, -- cgit v1.2.3 From 78d56d78c3ab5eeca35c6fcb10301418eda8c405 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 11 May 2012 18:01:35 +0100 Subject: drm/i915/dp: For consistency use the DP hotplug synonyms Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_dp.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index c686393abe6d..9b2effcc90e5 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -2462,19 +2462,19 @@ intel_dp_init(struct drm_device *dev, int output_reg) case DP_B: case PCH_DP_B: dev_priv->hotplug_supported_mask |= - HDMIB_HOTPLUG_INT_STATUS; + DPB_HOTPLUG_INT_STATUS; name = "DPDDC-B"; break; case DP_C: case PCH_DP_C: dev_priv->hotplug_supported_mask |= - HDMIC_HOTPLUG_INT_STATUS; + DPC_HOTPLUG_INT_STATUS; name = "DPDDC-C"; break; case DP_D: case PCH_DP_D: dev_priv->hotplug_supported_mask |= - HDMID_HOTPLUG_INT_STATUS; + DPD_HOTPLUG_INT_STATUS; name = "DPDDC-D"; break; } -- cgit v1.2.3 From c9a2969830ae6f8c548f4960751dbb243f6f1f5e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 10 Apr 2012 13:55:47 +0200 Subject: drm/i915: clarify preferred sdvo input mode code - kill intel_sdvo->input_dtd, it's only used as a temporary variable, we store the preferred input mode in the adjusted mode at mode_fixup time. - rename the function to make it clear what we want it to do (get the preferred mode) and say in a comment what it unfortunately does as a side-effect (set the new output timings). Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_sdvo.c | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index e0c93e022995..7d4d01786daa 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -140,9 +140,6 @@ struct intel_sdvo { /* DDC bus used by this SDVO encoder */ uint8_t ddc_bus; - - /* Input timings for adjusted_mode */ - struct intel_sdvo_dtd input_dtd; }; struct intel_sdvo_connector { @@ -949,11 +946,15 @@ intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo, return true; } +/* Asks the sdvo controller for the preferred input mode given the output mode. + * Unfortunately we have to set up the full output mode to do that. */ static bool -intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { + struct intel_sdvo_dtd input_dtd; + /* Reset the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) return false; @@ -965,10 +966,10 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo, return false; if (!intel_sdvo_get_preferred_input_timing(intel_sdvo, - &intel_sdvo->input_dtd)) + &input_dtd)) return false; - intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd); + intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); return true; } @@ -989,17 +990,17 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode)) return false; - (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo, - mode, - adjusted_mode); + (void) intel_sdvo_get_preferred_input_mode(intel_sdvo, + mode, + adjusted_mode); } else if (intel_sdvo->is_lvds) { if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, intel_sdvo->sdvo_lvds_fixed_mode)) return false; - (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo, - mode, - adjusted_mode); + (void) intel_sdvo_get_preferred_input_mode(intel_sdvo, + mode, + adjusted_mode); } /* Make the CRTC code factor in the SDVO pixel multiplier. The -- cgit v1.2.3 From c8d4bb54c11c66610aaf8829acff6aa0506c7c29 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 10 Apr 2012 13:55:48 +0200 Subject: drm/i915: don't silently ignore sdvo mode_set failures Unfortunately we can't abort a mode_set, but at least tell the user that something might have gone wrong when setting the sdvo input or output timing fails. Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_sdvo.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 7d4d01786daa..a1840f44941b 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1054,7 +1054,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, intel_sdvo->sdvo_lvds_fixed_mode); else intel_sdvo_get_dtd_from_mode(&output_dtd, mode); - (void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd); + if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd)) + DRM_INFO("Setting output timings on %s failed\n", + SDVO_NAME(intel_sdvo)); /* Set the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) @@ -1076,7 +1078,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, * adjusted_mode. */ intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); - (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd); + if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd)) + DRM_INFO("Setting input timings on %s failed\n", + SDVO_NAME(intel_sdvo)); switch (pixel_multiplier) { default: -- cgit v1.2.3 From 5c81fe85dad3c8c2bcec03e3fc2bfd4ea198763c Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 24 May 2012 15:03:08 -0700 Subject: drm/i915: timeout parameter for seqno wait Insert a wait parameter in the code so we can possibly timeout on a seqno wait if need be. The code should be functionally the same as before because all the callers will continue to retry if an arbitrary timeout elapses. We'd like to have nanosecond granularity, but the only way to do this is with hrtimer, and that doesn't fit well with the needs of this code. v2: Fix rebase error (Chris) Return proper time even in wedged + signal case (Chris + Ben) Use timespec constructs (Ben) Didn't take Daniel's advice regarding the Frankenstein-ness of the function. I did try his advice, but in the end I liked the way the original code looked, better. v3: Make wakeups far less frequent for infinite waits (Chris) v4: Remove dummy_wait variable (Daniel) Use raw monotonic time instead of jiffies (made the code a bit cleaner) (Ben) Added a couple of warnings (Ben) v5: Remove warnings (Daniel) Use more accurate time diff for default case (Daniel) Bikeshed for setting the return timespec in timeout case (Daniel) s/jiffies/time in one of the comments Signed-off-by: Ben Widawsky Reviewed-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 70 ++++++++++++++++++++++++++++++++++------- 1 file changed, 58 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6d2180cf3da5..c8a5b04bc8d5 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1869,34 +1869,82 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) return ret; } +/** + * __wait_seqno - wait until execution of seqno has finished + * @ring: the ring expected to report seqno + * @seqno: duh! + * @interruptible: do an interruptible wait (normally yes) + * @timeout: in - how long to wait (NULL forever); out - how much time remaining + * + * Returns 0 if the seqno was found within the alloted time. Else returns the + * errno with remaining time filled in timeout argument. + */ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, - bool interruptible) + bool interruptible, struct timespec *timeout) { drm_i915_private_t *dev_priv = ring->dev->dev_private; - int ret = 0; + struct timespec before, now, wait_time={1,0}; + unsigned long timeout_jiffies; + long end; + bool wait_forever = true; if (i915_seqno_passed(ring->get_seqno(ring), seqno)) return 0; trace_i915_gem_request_wait_begin(ring, seqno); + + if (timeout != NULL) { + wait_time = *timeout; + wait_forever = false; + } + + timeout_jiffies = timespec_to_jiffies(&wait_time); + if (WARN_ON(!ring->irq_get(ring))) return -ENODEV; + /* Record current time in case interrupted by signal, or wedged * */ + getrawmonotonic(&before); + #define EXIT_COND \ (i915_seqno_passed(ring->get_seqno(ring), seqno) || \ atomic_read(&dev_priv->mm.wedged)) + do { + if (interruptible) + end = wait_event_interruptible_timeout(ring->irq_queue, + EXIT_COND, + timeout_jiffies); + else + end = wait_event_timeout(ring->irq_queue, EXIT_COND, + timeout_jiffies); - if (interruptible) - ret = wait_event_interruptible(ring->irq_queue, - EXIT_COND); - else - wait_event(ring->irq_queue, EXIT_COND); + if (atomic_read(&dev_priv->mm.wedged)) + end = -EAGAIN; + } while (end == 0 && wait_forever); + + getrawmonotonic(&now); ring->irq_put(ring); trace_i915_gem_request_wait_end(ring, seqno); #undef EXIT_COND - return ret; + if (timeout) { + struct timespec sleep_time = timespec_sub(now, before); + *timeout = timespec_sub(*timeout, sleep_time); + } + + switch (end) { + case -EAGAIN: /* Wedged */ + case -ERESTARTSYS: /* Signal */ + return (int)end; + case 0: /* Timeout */ + if (timeout) + set_normalized_timespec(timeout, 0, 0); + return -ETIME; + default: /* Completed */ + WARN_ON(end < 0); /* We're not aware of other errors */ + return 0; + } } /** @@ -1920,9 +1968,7 @@ i915_wait_request(struct intel_ring_buffer *ring, if (ret) return ret; - ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible); - if (atomic_read(&dev_priv->mm.wedged)) - ret = -EAGAIN; + ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL); return ret; } @@ -3002,7 +3048,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) if (seqno == 0) return 0; - ret = __wait_seqno(ring, seqno, true); + ret = __wait_seqno(ring, seqno, true, NULL); if (ret == 0) queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); -- cgit v1.2.3 From f3fd37683cc406ffaccf097de0433130c85c8651 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 24 May 2012 15:03:09 -0700 Subject: drm/i915: improve i915_wait_request_begin trace The trace events adds whether or not the wait was blocking. Blocking in this case means to hold struct_mutex (ie. no new work can be submitted during the wait). The information is inherently racy. The blocking information is racy since mutex_is_locked doesn't check that the current thread holds the lock. The only other option would be to pass the boolean information of whether or not the class was blocking down through the stack which is less desirable. v2: Don't do a trace event per loop. (Chris) Only get blocking/non-blocking info (Chris) v3: updated comment in code as well as commit msg (Daniel) Add "(NB)" to trace information to remind us in 6 months (Ben) Signed-off-by: Ben Widawsky Reviewed-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_trace.h | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index dac7bba4d9da..fe90b3a84a6d 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -311,9 +311,33 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, TP_ARGS(ring, seqno) ); -DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin, +TRACE_EVENT(i915_gem_request_wait_begin, TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), - TP_ARGS(ring, seqno) + TP_ARGS(ring, seqno), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u32, ring) + __field(u32, seqno) + __field(bool, blocking) + ), + + /* NB: the blocking information is racy since mutex_is_locked + * doesn't check that the current thread holds the lock. The only + * other option would be to pass the boolean information of whether + * or not the class was blocking down through the stack which is + * less desirable. + */ + TP_fast_assign( + __entry->dev = ring->dev->primary->index; + __entry->ring = ring->id; + __entry->seqno = seqno; + __entry->blocking = mutex_is_locked(&ring->dev->struct_mutex); + ), + + TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", + __entry->dev, __entry->ring, __entry->seqno, + __entry->blocking ? "yes (NB)" : "no") ); DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, -- cgit v1.2.3 From 23ba4fd0a42a46551041e379712e808ad496ba45 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 24 May 2012 15:03:10 -0700 Subject: drm/i915: wait render timeout ioctl This helps implement GL_ARB_sync but stops short of allowing full blown sync objects. Finally we can use the new timed seqno waiting function to allow userspace to wait on a buffer object with a timeout. This implements that interface. The IOCTL will take as input a buffer object handle, and a timeout in nanoseconds (flags is currently optional but will likely be used for permutations of flush operations). Users may specify 0 nanoseconds to instantly check. The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any non-zero timeout parameter the wait ioctl will wait for the given number of nanoseconds on an object becoming unbusy. Since the wait itself does so holding struct_mutex the object may become re-busied before this completes. A similar but shorter race condition exists in the busy ioctl. v2: ETIME/ERESTARTSYS instead of changing to EBUSY, and EGAIN (Chris) Flush the object from the gpu write domain (Chris + Daniel) Fix leaked refcount in good case (Chris) Naturally align ioctl struct (Chris) v3: Drop lock after getting seqno to avoid ugly dance (Chris) v4: check for 0 timeout after olr check to allow polling (Chris) v5: Updated the comment. (Chris) v6: Return -ETIME instead of -EBUSY when timeout_ns is 0 (Daniel) Fix the commit message comment to be less ugly (Ben) Add a warning to check the return timespec (Ben) v7: Use DRM_AUTH for the ioctl. (Eugeni) Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 2 + drivers/gpu/drm/i915/i915_gem.c | 86 +++++++++++++++++++++++++++++++++++++++++ include/drm/i915_drm.h | 10 +++++ 4 files changed, 99 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index f94792626b94..262a74d1f852 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1803,6 +1803,7 @@ struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED), }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 11c7a6a330c1..8ac10e8c0cdb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1229,6 +1229,8 @@ int i915_gem_get_tiling(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int i915_gem_wait_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); void i915_gem_load(struct drm_device *dev); int i915_gem_init_object(struct drm_gem_object *obj); int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c8a5b04bc8d5..a0d740fac240 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2000,6 +2000,92 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj) return 0; } +/** + * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT + * @DRM_IOCTL_ARGS: standard ioctl arguments + * + * Returns 0 if successful, else an error is returned with the remaining time in + * the timeout parameter. + * -ETIME: object is still busy after timeout + * -ERESTARTSYS: signal interrupted the wait + * -ENONENT: object doesn't exist + * Also possible, but rare: + * -EAGAIN: GPU wedged + * -ENOMEM: damn + * -ENODEV: Internal IRQ fail + * -E?: The add request failed + * + * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any + * non-zero timeout parameter the wait ioctl will wait for the given number of + * nanoseconds on an object becoming unbusy. Since the wait itself does so + * without holding struct_mutex the object may become re-busied before this + * function completes. A similar but shorter * race condition exists in the busy + * ioctl + */ +int +i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct drm_i915_gem_wait *args = data; + struct drm_i915_gem_object *obj; + struct intel_ring_buffer *ring = NULL; + struct timespec timeout; + u32 seqno = 0; + int ret = 0; + + timeout = ns_to_timespec(args->timeout_ns); + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle)); + if (&obj->base == NULL) { + mutex_unlock(&dev->struct_mutex); + return -ENOENT; + } + + /* Need to make sure the object is flushed first. This non-obvious + * flush is required to enforce that (active && !olr) == no wait + * necessary. + */ + ret = i915_gem_object_flush_gpu_write_domain(obj); + if (ret) + goto out; + + if (obj->active) { + seqno = obj->last_rendering_seqno; + ring = obj->ring; + } + + if (seqno == 0) + goto out; + + ret = i915_gem_check_olr(ring, seqno); + if (ret) + goto out; + + /* Do this after OLR check to make sure we make forward progress polling + * on this IOCTL with a 0 timeout (like busy ioctl) + */ + if (!args->timeout_ns) { + ret = -ETIME; + goto out; + } + + drm_gem_object_unreference(&obj->base); + mutex_unlock(&dev->struct_mutex); + + ret = __wait_seqno(ring, seqno, true, &timeout); + WARN_ON(!timespec_valid(&timeout)); + args->timeout_ns = timespec_to_ns(&timeout); + return ret; + +out: + drm_gem_object_unreference(&obj->base); + mutex_unlock(&dev->struct_mutex); + return ret; +} + /** * i915_gem_object_sync - sync an object to a ring. * diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index f3f82242bf1d..bab174334da5 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -200,6 +200,7 @@ typedef struct _drm_i915_sarea { #define DRM_I915_GEM_EXECBUFFER2 0x29 #define DRM_I915_GET_SPRITE_COLORKEY 0x2a #define DRM_I915_SET_SPRITE_COLORKEY 0x2b +#define DRM_I915_GEM_WAIT 0x2c #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) @@ -243,6 +244,7 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) +#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) /* Allow drivers to submit batchbuffers directly to hardware, relying * on the security mechanisms provided by hardware. @@ -886,4 +888,12 @@ struct drm_intel_sprite_colorkey { __u32 flags; }; +struct drm_i915_gem_wait { + /** Handle of BO we shall wait on */ + __u32 bo_handle; + __u32 flags; + /** Number of nanoseconds to wait, Returns time remaining. */ + __u64 timeout_ns; +}; + #endif /* _I915_DRM_H_ */ -- cgit v1.2.3 From 199b2bc25ba587f666a712e9d8475d691d9cec4c Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 24 May 2012 15:03:11 -0700 Subject: drm/i915: s/i915_wait_request/i915_wait_seqno/g Wait request is poorly named IMO. After working with these functions for some time, I feel it's much clearer to name the functions more appropriately. Of course we must update the callers to use the new name as well. This leaves room within our namespace for a *real* wait request function at some point. Note to maintainer: this patch is optional. Signed-off-by: Ben Widawsky Reviewed-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 4 ++-- drivers/gpu/drm/i915/i915_gem.c | 9 ++++----- drivers/gpu/drm/i915/intel_overlay.c | 4 ++-- drivers/gpu/drm/i915/intel_ringbuffer.c | 2 +- 4 files changed, 9 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8ac10e8c0cdb..a17e31e91951 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1315,8 +1315,8 @@ int __must_check i915_gem_idle(struct drm_device *dev); int __must_check i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file, struct drm_i915_gem_request *request); -int __must_check i915_wait_request(struct intel_ring_buffer *ring, - uint32_t seqno); +int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, + uint32_t seqno); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int __must_check i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a0d740fac240..d2eaa008573b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1952,8 +1952,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, * request and object lists appropriately for that event. */ int -i915_wait_request(struct intel_ring_buffer *ring, - uint32_t seqno) +i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) { drm_i915_private_t *dev_priv = ring->dev->dev_private; int ret = 0; @@ -1991,7 +1990,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj) * it. */ if (obj->active) { - ret = i915_wait_request(obj->ring, obj->last_rendering_seqno); + ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno); if (ret) return ret; i915_gem_retire_requests_ring(obj->ring); @@ -2264,7 +2263,7 @@ static int i915_ring_idle(struct intel_ring_buffer *ring) return ret; } - return i915_wait_request(ring, i915_gem_next_request_seqno(ring)); + return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring)); } int i915_gpu_idle(struct drm_device *dev) @@ -2468,7 +2467,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) } if (obj->last_fenced_seqno) { - ret = i915_wait_request(obj->ring, obj->last_fenced_seqno); + ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 458743da3774..830d0dd610e1 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -226,7 +226,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, } overlay->last_flip_req = request->seqno; overlay->flip_tail = tail; - ret = i915_wait_request(ring, overlay->last_flip_req); + ret = i915_wait_seqno(ring, overlay->last_flip_req); if (ret) return ret; i915_gem_retire_requests(dev); @@ -452,7 +452,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) if (overlay->last_flip_req == 0) return 0; - ret = i915_wait_request(ring, overlay->last_flip_req); + ret = i915_wait_seqno(ring, overlay->last_flip_req); if (ret) return ret; i915_gem_retire_requests(dev); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index b59b6d5b7583..1df1694835a1 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1085,7 +1085,7 @@ static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) was_interruptible = dev_priv->mm.interruptible; dev_priv->mm.interruptible = false; - ret = i915_wait_request(ring, seqno); + ret = i915_wait_seqno(ring, seqno); dev_priv->mm.interruptible = was_interruptible; if (!ret) -- cgit v1.2.3 From eeafaaca763408c099d2ade3a69e0716f296a97b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 25 May 2012 10:23:37 +0100 Subject: drm/i915/hdmi: Fix reg values for g4x_hdmi_connected Paulo pointed out that gen4 re-used the SDVO registers for HDMI (the separate HDMI registers where introduced with the first PCH) and so g4x_hdmi_connected() never selected the right bit and always returned disconnected. Regression in commit 8ec22b214d76773c9d89f4040505ce10f677ed9a Author: Chris Wilson Date: Fri May 11 18:01:34 2012 +0100 drm/i915/hdmi: Query the live connector status bit for G4x Cc: Paulo Zanoni Reviewed-by: Paulo Zanoni Tested-by: Paulo Zanoni Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_hdmi.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 77f0f8fb9e35..4c6f1411b28a 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -459,15 +459,12 @@ static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi) uint32_t bit; switch (intel_hdmi->sdvox_reg) { - case HDMIB: + case SDVOB: bit = HDMIB_HOTPLUG_LIVE_STATUS; break; - case HDMIC: + case SDVOC: bit = HDMIC_HOTPLUG_LIVE_STATUS; break; - case HDMID: - bit = HDMID_HOTPLUG_LIVE_STATUS; - break; default: bit = 0; break; -- cgit v1.2.3 From 687f4d06dbb1cf3c8f9f6050cd7aad24dc0ce978 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 28 May 2012 16:42:48 -0300 Subject: drm/i915: add set_infoframes to struct intel_hdmi We need a function that is able to fully 'set' the state of the DIP registers to a known state. Currently, we have the write_infoframe function that is called twice: once for AVI and once for SPD. The problem is that write_infoframe tries to keep the state of the DIP register as it is, changing only the minimum necessary bits. The second problem is that write_infoframe does twice (once for each time it is called) some work that should be done only once (like waiting for vblank and setting the port). If we add even more DIPs, it will do even more repeated work. This patch only adds the infrastructure keeping the code behavior the same as before. v2: add static keywords Signed-off-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ddi.c | 3 +-- drivers/gpu/drm/i915/intel_drv.h | 5 ++--- drivers/gpu/drm/i915/intel_hdmi.c | 47 +++++++++++++++++++++++++++++++++++---- 3 files changed, 46 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 46d1e886c692..f33fe1a1c33e 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -726,8 +726,7 @@ void intel_ddi_mode_set(struct drm_encoder *encoder, I915_WRITE(DDI_FUNC_CTL(pipe), temp); - intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); - intel_hdmi_set_spd_infoframe(encoder); + intel_hdmi->set_infoframes(encoder, adjusted_mode); } void intel_ddi_dpms(struct drm_encoder *encoder, int mode) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 3e0918834e7e..39d7b07c1e8b 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -301,6 +301,8 @@ struct intel_hdmi { enum hdmi_force_audio force_audio; void (*write_infoframe)(struct drm_encoder *encoder, struct dip_infoframe *frame); + void (*set_infoframes)(struct drm_encoder *encoder, + struct drm_display_mode *adjusted_mode); }; static inline struct drm_crtc * @@ -343,9 +345,6 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector) extern void intel_crt_init(struct drm_device *dev); extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); -extern void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, - struct drm_display_mode *adjusted_mode); -extern void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder); extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 4c6f1411b28a..8d892b0cd8a1 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -315,7 +315,7 @@ static void intel_set_infoframe(struct drm_encoder *encoder, intel_hdmi->write_infoframe(encoder, frame); } -void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, +static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { struct dip_infoframe avi_if = { @@ -330,7 +330,7 @@ void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, intel_set_infoframe(encoder, &avi_if); } -void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) +static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) { struct dip_infoframe spd_if; @@ -345,6 +345,41 @@ void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) intel_set_infoframe(encoder, &spd_if); } +static void g4x_set_infoframes(struct drm_encoder *encoder, + struct drm_display_mode *adjusted_mode) +{ + intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); + intel_hdmi_set_spd_infoframe(encoder); +} + +static void ibx_set_infoframes(struct drm_encoder *encoder, + struct drm_display_mode *adjusted_mode) +{ + intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); + intel_hdmi_set_spd_infoframe(encoder); +} + +static void cpt_set_infoframes(struct drm_encoder *encoder, + struct drm_display_mode *adjusted_mode) +{ + intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); + intel_hdmi_set_spd_infoframe(encoder); +} + +static void vlv_set_infoframes(struct drm_encoder *encoder, + struct drm_display_mode *adjusted_mode) +{ + intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); + intel_hdmi_set_spd_infoframe(encoder); +} + +static void hsw_set_infoframes(struct drm_encoder *encoder, + struct drm_display_mode *adjusted_mode) +{ + intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); + intel_hdmi_set_spd_infoframe(encoder); +} + static void intel_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -388,8 +423,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, I915_WRITE(intel_hdmi->sdvox_reg, sdvox); POSTING_READ(intel_hdmi->sdvox_reg); - intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); - intel_hdmi_set_spd_infoframe(encoder); + intel_hdmi->set_infoframes(encoder, adjusted_mode); } static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) @@ -734,9 +768,11 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) if (!HAS_PCH_SPLIT(dev)) { intel_hdmi->write_infoframe = g4x_write_infoframe; + intel_hdmi->set_infoframes = g4x_set_infoframes; I915_WRITE(VIDEO_DIP_CTL, 0); } else if (IS_VALLEYVIEW(dev)) { intel_hdmi->write_infoframe = vlv_write_infoframe; + intel_hdmi->set_infoframes = vlv_set_infoframes; for_each_pipe(i) I915_WRITE(VLV_TVIDEO_DIP_CTL(i), 0); } else if (IS_HASWELL(dev)) { @@ -744,14 +780,17 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) * just doing the minimal required for HDMI to work at this stage. */ intel_hdmi->write_infoframe = hsw_write_infoframe; + intel_hdmi->set_infoframes = hsw_set_infoframes; for_each_pipe(i) I915_WRITE(HSW_TVIDEO_DIP_CTL(i), 0); } else if (HAS_PCH_IBX(dev)) { intel_hdmi->write_infoframe = ibx_write_infoframe; + intel_hdmi->set_infoframes = ibx_set_infoframes; for_each_pipe(i) I915_WRITE(TVIDEO_DIP_CTL(i), 0); } else { intel_hdmi->write_infoframe = cpt_write_infoframe; + intel_hdmi->set_infoframes = cpt_set_infoframes; for_each_pipe(i) I915_WRITE(TVIDEO_DIP_CTL(i), 0); } -- cgit v1.2.3 From 0c14c7f957e70c3cb100e3fd6553b0ebea557571 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 28 May 2012 16:42:49 -0300 Subject: drm/i915: properly alternate between DVI and HDMI This solves problems that happen when you alternate between HDMI and DVI on the same port. I can reproduce these problems using DP->HDMI and DP->DVI adapters on a DP port. When you first plug HDMI and then plug DVI, you need to stop sending DIPs, even if the port is in DVI mode (see the HDMI register spec). If you don't stop sending DIPs, you'll see a pink vertical line on the left side of the screen, some modes will give you a black screen, some modes won't work correctly. When you first plug DVI and then plug HDMI, you need to properly enable the DIPs, otherwise the HW won't send them. After spending a lot of time investigating this, I concluded that if the DIPs are disabled, we should not write to the DIP register again because when we do this, we also set the AVI InfoFrame frequency to "once", and this seems to really confuse our hardware. Since this problem was not exactly easy to debug, I'm adopting the defensive behavior and not just avoing the "disable twice" sequence, but also explicitly selecting the AVI InfoFrame and setting its frequency to a correct one. Also, move the "is_dvi" check from intel_set_infoframe to the set_infoframes functions since now they're going to be the first ones to deal with the DIP registers. This patch adds the code to fix the problem, but it depends on the removal of some code that can't be removed right now and will come later in the patch series. The patch that we need is: - drm/i915: don't write 0 to DIP control at HDMI init [danvet: Paulo clarified that this additional patch is only required to make the fix complete, this patch here alone doesn't introduce a regression but only partially solves the problem of randomly clearing the dip registers.] V2: Be even more defensive by selecting AVI and setting its frequency outside the "is_dvi" check. Signed-off-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_hdmi.c | 88 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 85 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 8d892b0cd8a1..2f2adc4e511c 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -308,9 +308,6 @@ static void intel_set_infoframe(struct drm_encoder *encoder, { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); - if (!intel_hdmi->has_hdmi_sink) - return; - intel_dip_infoframe_csum(frame); intel_hdmi->write_infoframe(encoder, frame); } @@ -348,6 +345,30 @@ static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) static void g4x_set_infoframes(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { + struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + u32 reg = VIDEO_DIP_CTL; + u32 val = I915_READ(reg); + + /* If the registers were not initialized yet, they might be zeroes, + * which means we're selecting the AVI DIP and we're setting its + * frequency to once. This seems to really confuse the HW and make + * things stop working (the register spec says the AVI always needs to + * be sent every VSync). So here we avoid writing to the register more + * than we need and also explicitly select the AVI DIP and explicitly + * set its frequency to every VSync. Avoiding to write it twice seems to + * be enough to solve the problem, but being defensive shouldn't hurt us + * either. */ + val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; + + if (!intel_hdmi->has_hdmi_sink) { + if (!(val & VIDEO_DIP_ENABLE)) + return; + val &= ~VIDEO_DIP_ENABLE; + I915_WRITE(reg, val); + return; + } + intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); } @@ -355,6 +376,23 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, static void ibx_set_infoframes(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { + struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); + u32 val = I915_READ(reg); + + /* See the big comment in g4x_set_infoframes() */ + val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; + + if (!intel_hdmi->has_hdmi_sink) { + if (!(val & VIDEO_DIP_ENABLE)) + return; + val &= ~VIDEO_DIP_ENABLE; + I915_WRITE(reg, val); + return; + } + intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); } @@ -362,6 +400,23 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, static void cpt_set_infoframes(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { + struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); + u32 val = I915_READ(reg); + + /* See the big comment in g4x_set_infoframes() */ + val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; + + if (!intel_hdmi->has_hdmi_sink) { + if (!(val & VIDEO_DIP_ENABLE)) + return; + val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI); + I915_WRITE(reg, val); + return; + } + intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); } @@ -369,6 +424,23 @@ static void cpt_set_infoframes(struct drm_encoder *encoder, static void vlv_set_infoframes(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { + struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); + u32 val = I915_READ(reg); + + /* See the big comment in g4x_set_infoframes() */ + val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; + + if (!intel_hdmi->has_hdmi_sink) { + if (!(val & VIDEO_DIP_ENABLE)) + return; + val &= ~VIDEO_DIP_ENABLE; + I915_WRITE(reg, val); + return; + } + intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); } @@ -376,6 +448,16 @@ static void vlv_set_infoframes(struct drm_encoder *encoder, static void hsw_set_infoframes(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { + struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe); + + if (!intel_hdmi->has_hdmi_sink) { + I915_WRITE(reg, 0); + return; + } + intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); } -- cgit v1.2.3 From f278d97215d3cca43aa1569b5ae712cc17e74702 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 28 May 2012 16:42:50 -0300 Subject: drm/i915: only set the HDMI port on the DIP once Not once for each InfoFrame. Now we have a function that allows us to do this. [danvet: Paulo clarified on irc that a later bugfix patch needs this cleanup.] Signed-off-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_hdmi.c | 56 ++++++++++++++++++++++----------------- 1 file changed, 31 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 2f2adc4e511c..1df1ec764a01 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -121,18 +121,9 @@ static void g4x_write_infoframe(struct drm_encoder *encoder, uint32_t *data = (uint32_t *)frame; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); u32 val = I915_READ(VIDEO_DIP_CTL); unsigned i, len = DIP_HEADER_SIZE + frame->len; - val &= ~VIDEO_DIP_PORT_MASK; - if (intel_hdmi->sdvox_reg == SDVOB) - val |= VIDEO_DIP_PORT_B; - else if (intel_hdmi->sdvox_reg == SDVOC) - val |= VIDEO_DIP_PORT_C; - else - return; - val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ val |= g4x_infoframe_index(frame); @@ -160,26 +151,10 @@ static void ibx_write_infoframe(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); unsigned i, len = DIP_HEADER_SIZE + frame->len; u32 val = I915_READ(reg); - val &= ~VIDEO_DIP_PORT_MASK; - switch (intel_hdmi->sdvox_reg) { - case HDMIB: - val |= VIDEO_DIP_PORT_B; - break; - case HDMIC: - val |= VIDEO_DIP_PORT_C; - break; - case HDMID: - val |= VIDEO_DIP_PORT_D; - break; - default: - return; - } - intel_wait_for_vblank(dev, intel_crtc->pipe); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ @@ -369,6 +344,20 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, return; } + val &= ~VIDEO_DIP_PORT_MASK; + switch (intel_hdmi->sdvox_reg) { + case SDVOB: + val |= VIDEO_DIP_PORT_B; + break; + case SDVOC: + val |= VIDEO_DIP_PORT_C; + break; + default: + return; + } + + I915_WRITE(reg, val); + intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); } @@ -393,6 +382,23 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, return; } + val &= ~VIDEO_DIP_PORT_MASK; + switch (intel_hdmi->sdvox_reg) { + case HDMIB: + val |= VIDEO_DIP_PORT_B; + break; + case HDMIC: + val |= VIDEO_DIP_PORT_C; + break; + case HDMID: + val |= VIDEO_DIP_PORT_D; + break; + default: + return; + } + + I915_WRITE(reg, val); + intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); } -- cgit v1.2.3 From 822974aea875348e69fb6b6d2078ae8372eeec66 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 28 May 2012 16:42:51 -0300 Subject: drm/i915: enable DIP before enabling each InfoFrame So the write_infoframe function can assume the DIP is on. V2: Be more defensive and add WARN(). Signed-off-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_hdmi.c | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 1df1ec764a01..de6f4c2c82ac 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -124,11 +124,12 @@ static void g4x_write_infoframe(struct drm_encoder *encoder, u32 val = I915_READ(VIDEO_DIP_CTL); unsigned i, len = DIP_HEADER_SIZE + frame->len; + WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); + val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ val |= g4x_infoframe_index(frame); val &= ~g4x_infoframe_enable(frame); - val |= VIDEO_DIP_ENABLE; I915_WRITE(VIDEO_DIP_CTL, val); @@ -155,13 +156,14 @@ static void ibx_write_infoframe(struct drm_encoder *encoder, unsigned i, len = DIP_HEADER_SIZE + frame->len; u32 val = I915_READ(reg); + WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); + intel_wait_for_vblank(dev, intel_crtc->pipe); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ val |= g4x_infoframe_index(frame); val &= ~g4x_infoframe_enable(frame); - val |= VIDEO_DIP_ENABLE; I915_WRITE(reg, val); @@ -188,6 +190,8 @@ static void cpt_write_infoframe(struct drm_encoder *encoder, unsigned i, len = DIP_HEADER_SIZE + frame->len; u32 val = I915_READ(reg); + WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); + intel_wait_for_vblank(dev, intel_crtc->pipe); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ @@ -195,13 +199,9 @@ static void cpt_write_infoframe(struct drm_encoder *encoder, /* The DIP control register spec says that we need to update the AVI * infoframe without clearing its enable bit */ - if (frame->type == DIP_TYPE_AVI) - val |= VIDEO_DIP_ENABLE_AVI; - else + if (frame->type != DIP_TYPE_AVI) val &= ~g4x_infoframe_enable(frame); - val |= VIDEO_DIP_ENABLE; - I915_WRITE(reg, val); for (i = 0; i < len; i += 4) { @@ -227,13 +227,14 @@ static void vlv_write_infoframe(struct drm_encoder *encoder, unsigned i, len = DIP_HEADER_SIZE + frame->len; u32 val = I915_READ(reg); + WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); + intel_wait_for_vblank(dev, intel_crtc->pipe); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ val |= g4x_infoframe_index(frame); val &= ~g4x_infoframe_enable(frame); - val |= VIDEO_DIP_ENABLE; I915_WRITE(reg, val); @@ -356,6 +357,8 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, return; } + val |= VIDEO_DIP_ENABLE; + I915_WRITE(reg, val); intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); @@ -397,6 +400,8 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, return; } + val |= VIDEO_DIP_ENABLE; + I915_WRITE(reg, val); intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); @@ -423,6 +428,11 @@ static void cpt_set_infoframes(struct drm_encoder *encoder, return; } + /* Set both together, unset both together: see the spec. */ + val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI; + + I915_WRITE(reg, val); + intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); } @@ -447,6 +457,10 @@ static void vlv_set_infoframes(struct drm_encoder *encoder, return; } + val |= VIDEO_DIP_ENABLE; + + I915_WRITE(reg, val); + intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); } -- cgit v1.2.3 From 5cde2a62e8adf12b02e47cf15630e87d4ba8ad5e Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 28 May 2012 16:42:52 -0300 Subject: drm/i915: don't wait for vblank while writing InfoFrames This function is called when the pipe is disabled, so it always gets the 50ms timeout. This function is called once for each InfoFrame, so we actually get a 100ms timeout. Will be more if we add more InfoFrames. Also, the spec says we need to "wait for a VSync to ensure completion of any pending DIP transmissions", not for a VBlank. OTOH, the register documentation suggests that the DIPs are sent *during* the VSync, so shouldn't we be waiting until *after* the VSync to ensure all DIPs are sent? So this wait_for_vblank seems, besides useless, totally wrong. If we ever want to change some specific InfoFrame on-the-fly (outside of the modeset code), the code that changes the InfoFrame will have to do the waiting itself, and properly. Signed-off-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_hdmi.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index de6f4c2c82ac..614d83fb6738 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -158,8 +158,6 @@ static void ibx_write_infoframe(struct drm_encoder *encoder, WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); - intel_wait_for_vblank(dev, intel_crtc->pipe); - val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ val |= g4x_infoframe_index(frame); @@ -192,8 +190,6 @@ static void cpt_write_infoframe(struct drm_encoder *encoder, WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); - intel_wait_for_vblank(dev, intel_crtc->pipe); - val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ val |= g4x_infoframe_index(frame); @@ -229,8 +225,6 @@ static void vlv_write_infoframe(struct drm_encoder *encoder, WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); - intel_wait_for_vblank(dev, intel_crtc->pipe); - val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ val |= g4x_infoframe_index(frame); @@ -265,8 +259,6 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, if (data_reg == 0) return; - intel_wait_for_vblank(dev, intel_crtc->pipe); - val &= ~hsw_infoframe_enable(frame); I915_WRITE(ctl_reg, val); -- cgit v1.2.3 From 0dd87d2084fc9e1f663d3a2c24f0e3f94b7f20e4 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 28 May 2012 16:42:53 -0300 Subject: drm/i915: explicitly disable the DIPs we're not using From this point on, the 'set_infoframe' functions always set the DIP registers to a known state, so anything done will always be undone at the modeset. Signed-off-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 6 ++++++ drivers/gpu/drm/i915/intel_hdmi.c | 13 +++++++++++++ 2 files changed, 19 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0f45a18a5e41..1855ac732718 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1717,8 +1717,10 @@ #define VIDEO_DIP_PORT_C (2 << 29) #define VIDEO_DIP_PORT_D (3 << 29) #define VIDEO_DIP_PORT_MASK (3 << 29) +#define VIDEO_DIP_ENABLE_GCP (1 << 25) #define VIDEO_DIP_ENABLE_AVI (1 << 21) #define VIDEO_DIP_ENABLE_VENDOR (2 << 21) +#define VIDEO_DIP_ENABLE_GAMUT (4 << 21) #define VIDEO_DIP_ENABLE_SPD (8 << 21) #define VIDEO_DIP_SELECT_AVI (0 << 19) #define VIDEO_DIP_SELECT_VENDOR (1 << 19) @@ -1729,7 +1731,11 @@ #define VIDEO_DIP_FREQ_2VSYNC (2 << 16) #define VIDEO_DIP_FREQ_MASK (3 << 16) /* HSW and later: */ +#define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20) +#define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16) #define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12) +#define VIDEO_DIP_ENABLE_VS_HSW (1 << 8) +#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4) #define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0) /* Panel power sequencing */ diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 614d83fb6738..620b0dba9e7a 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -350,6 +350,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, } val |= VIDEO_DIP_ENABLE; + val &= ~VIDEO_DIP_ENABLE_VENDOR; I915_WRITE(reg, val); @@ -393,6 +394,8 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, } val |= VIDEO_DIP_ENABLE; + val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | + VIDEO_DIP_ENABLE_GCP); I915_WRITE(reg, val); @@ -422,6 +425,8 @@ static void cpt_set_infoframes(struct drm_encoder *encoder, /* Set both together, unset both together: see the spec. */ val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI; + val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | + VIDEO_DIP_ENABLE_GCP); I915_WRITE(reg, val); @@ -450,6 +455,8 @@ static void vlv_set_infoframes(struct drm_encoder *encoder, } val |= VIDEO_DIP_ENABLE; + val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | + VIDEO_DIP_ENABLE_GCP); I915_WRITE(reg, val); @@ -464,12 +471,18 @@ static void hsw_set_infoframes(struct drm_encoder *encoder, struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe); + u32 val = I915_READ(reg); if (!intel_hdmi->has_hdmi_sink) { I915_WRITE(reg, 0); return; } + val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_GCP_HSW | + VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW); + + I915_WRITE(reg, val); + intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); } -- cgit v1.2.3 From 72b78c9d19ee3e69988424c499498e50eaba0859 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 28 May 2012 16:42:54 -0300 Subject: drm/i915: disable DIP while changing the port The register specification says we need to do this. V2: Only write the register if the port is enabled. Signed-off-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_hdmi.c | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 620b0dba9e7a..c858da986fdd 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -317,6 +317,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); u32 reg = VIDEO_DIP_CTL; u32 val = I915_READ(reg); + u32 port; /* If the registers were not initialized yet, they might be zeroes, * which means we're selecting the AVI DIP and we're setting its @@ -337,18 +338,26 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, return; } - val &= ~VIDEO_DIP_PORT_MASK; switch (intel_hdmi->sdvox_reg) { case SDVOB: - val |= VIDEO_DIP_PORT_B; + port = VIDEO_DIP_PORT_B; break; case SDVOC: - val |= VIDEO_DIP_PORT_C; + port = VIDEO_DIP_PORT_C; break; default: return; } + if (port != (val & VIDEO_DIP_PORT_MASK)) { + if (val & VIDEO_DIP_ENABLE) { + val &= ~VIDEO_DIP_ENABLE; + I915_WRITE(reg, val); + } + val &= ~VIDEO_DIP_PORT_MASK; + val |= port; + } + val |= VIDEO_DIP_ENABLE; val &= ~VIDEO_DIP_ENABLE_VENDOR; @@ -366,6 +375,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); + u32 port; /* See the big comment in g4x_set_infoframes() */ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; @@ -378,21 +388,29 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, return; } - val &= ~VIDEO_DIP_PORT_MASK; switch (intel_hdmi->sdvox_reg) { case HDMIB: - val |= VIDEO_DIP_PORT_B; + port = VIDEO_DIP_PORT_B; break; case HDMIC: - val |= VIDEO_DIP_PORT_C; + port = VIDEO_DIP_PORT_C; break; case HDMID: - val |= VIDEO_DIP_PORT_D; + port = VIDEO_DIP_PORT_D; break; default: return; } + if (port != (val & VIDEO_DIP_PORT_MASK)) { + if (val & VIDEO_DIP_ENABLE) { + val &= ~VIDEO_DIP_ENABLE; + I915_WRITE(reg, val); + } + val &= ~VIDEO_DIP_PORT_MASK; + val |= port; + } + val |= VIDEO_DIP_ENABLE; val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_GCP); -- cgit v1.2.3 From 9d32d1653db10eaba3140dd1a2f8dad51122f0b5 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 28 May 2012 16:42:55 -0300 Subject: drm/i915: don't write 0 to DIP control at HDMI init At this time, the HDMI port is enabled, and the DIP control register specification says we need to disable the port *before* disabling the DIPs. Also, while doing this we risk telling the HW to send the AVI DIPs once (not every VSync), which really seems to confuse the HW and trigger bugs where the DIPs are not sent. This code was here just to set the DIP register to a 'known state' before using it, but since now the set_infoframes functions already set the control registers to a known state, this code can go away. Also, the previous code disables *all* the DIP registers for *each* HDMI port, so we end disabling each DIP register more than once. This patch solves a problem I can reproduce on my IVB machine. When I boot it with just a single HDMI monitor, the AVI InfoFrames are not sent. With this patch, the InfoFrames are sent. Previously, I wrote a patch to 'touch the DIP registers after we enable the HDMI port' to solve this same problem, but that patch doesn't seem to be needed anymore after this patch. All this patch does is revert a chunk of the following commit: commit 64a8fc0145a1d0fdc25fc9367c2e6c621955fb3b Author: Jesse Barnes Date: Thu Sep 22 11:16:00 2011 +0530 drm/i915: fix ILK+ infoframe support So bugs that can be bisected to that commit may be fixed now. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=43256 Acked-by: Jesse Barnes Signed-off-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_hdmi.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index c858da986fdd..65af12ee98b1 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -816,7 +816,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) struct intel_encoder *intel_encoder; struct intel_connector *intel_connector; struct intel_hdmi *intel_hdmi; - int i; intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL); if (!intel_hdmi) @@ -894,30 +893,21 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) if (!HAS_PCH_SPLIT(dev)) { intel_hdmi->write_infoframe = g4x_write_infoframe; intel_hdmi->set_infoframes = g4x_set_infoframes; - I915_WRITE(VIDEO_DIP_CTL, 0); } else if (IS_VALLEYVIEW(dev)) { intel_hdmi->write_infoframe = vlv_write_infoframe; intel_hdmi->set_infoframes = vlv_set_infoframes; - for_each_pipe(i) - I915_WRITE(VLV_TVIDEO_DIP_CTL(i), 0); } else if (IS_HASWELL(dev)) { /* FIXME: Haswell has a new set of DIP frame registers, but we are * just doing the minimal required for HDMI to work at this stage. */ intel_hdmi->write_infoframe = hsw_write_infoframe; intel_hdmi->set_infoframes = hsw_set_infoframes; - for_each_pipe(i) - I915_WRITE(HSW_TVIDEO_DIP_CTL(i), 0); } else if (HAS_PCH_IBX(dev)) { intel_hdmi->write_infoframe = ibx_write_infoframe; intel_hdmi->set_infoframes = ibx_set_infoframes; - for_each_pipe(i) - I915_WRITE(TVIDEO_DIP_CTL(i), 0); } else { intel_hdmi->write_infoframe = cpt_write_infoframe; intel_hdmi->set_infoframes = cpt_set_infoframes; - for_each_pipe(i) - I915_WRITE(TVIDEO_DIP_CTL(i), 0); } if (IS_HASWELL(dev)) -- cgit v1.2.3 From b659c3dbe17714485a1756b74e92c27301571a80 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 28 May 2012 16:42:56 -0300 Subject: drm/i915: don't set SDVO_BORDER_ENABLE when we're HDMI The register that controls the HDMI port can be used to control the sDVO port. Some bits are defined only for sDVO, and SDVO_BORDER_ENABLE is one of those: HDMI ports that can't be used in sDVO mode don't even have this bit defined in their specifications. Signed-off-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_hdmi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 65af12ee98b1..194b453b052c 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -515,7 +515,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); u32 sdvox; - sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE; + sdvox = SDVO_ENCODING_HDMI; if (!HAS_PCH_SPLIT(dev)) sdvox |= intel_hdmi->color_range; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) -- cgit v1.2.3 From c30b61109021ecbc81966573862cf98f655e3531 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 28 May 2012 16:42:58 -0300 Subject: drm/i915: remove comment about HSW HDMI DIPs HSW support is now just like all the other generations: we send AVI and SPD InfoFrames. There are other DIPs for HSW, but there are other DIPs for the previous generations too. For each gen, we can see which DIPs are missing by looking at the 'set_infoframes' function: we explicitly disable the DIPs we're not using. Signed-off-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_hdmi.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 194b453b052c..5cb1d100dc84 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -897,9 +897,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) intel_hdmi->write_infoframe = vlv_write_infoframe; intel_hdmi->set_infoframes = vlv_set_infoframes; } else if (IS_HASWELL(dev)) { - /* FIXME: Haswell has a new set of DIP frame registers, but we are - * just doing the minimal required for HDMI to work at this stage. - */ intel_hdmi->write_infoframe = hsw_write_infoframe; intel_hdmi->set_infoframes = hsw_set_infoframes; } else if (HAS_PCH_IBX(dev)) { -- cgit v1.2.3 From 9d9740f099f2eaf309c4c9cbc0d732507140db28 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 28 May 2012 16:43:00 -0300 Subject: drm/i915: add some barriers when changing DIPs On IVB and older, we basically have two registers: the control and the data register. We write a few consecutitve times to the control register, and we need these writes to arrive exactly in the specified order. Also, when we're changing the data register, we need to guarantee that anything written to the control register already arrived (since changing the control register can change where the data register points to). Also, we need to make sure all the writes to the data register happen exactly in the specified order, and we also *can't* read the data register during this process, since reading and/or writing it will change the place it points to. So invoke the "better safe than sorry" rule and just be careful and put barriers everywhere :) On HSW we still have a control register that we write many times, but we have many data registers. Demanded-by: Chris Wilson Signed-off-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_hdmi.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 5cb1d100dc84..b507d38faa10 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -133,16 +133,19 @@ static void g4x_write_infoframe(struct drm_encoder *encoder, I915_WRITE(VIDEO_DIP_CTL, val); + mmiowb(); for (i = 0; i < len; i += 4) { I915_WRITE(VIDEO_DIP_DATA, *data); data++; } + mmiowb(); val |= g4x_infoframe_enable(frame); val &= ~VIDEO_DIP_FREQ_MASK; val |= VIDEO_DIP_FREQ_VSYNC; I915_WRITE(VIDEO_DIP_CTL, val); + POSTING_READ(VIDEO_DIP_CTL); } static void ibx_write_infoframe(struct drm_encoder *encoder, @@ -165,16 +168,19 @@ static void ibx_write_infoframe(struct drm_encoder *encoder, I915_WRITE(reg, val); + mmiowb(); for (i = 0; i < len; i += 4) { I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); data++; } + mmiowb(); val |= g4x_infoframe_enable(frame); val &= ~VIDEO_DIP_FREQ_MASK; val |= VIDEO_DIP_FREQ_VSYNC; I915_WRITE(reg, val); + POSTING_READ(reg); } static void cpt_write_infoframe(struct drm_encoder *encoder, @@ -200,16 +206,19 @@ static void cpt_write_infoframe(struct drm_encoder *encoder, I915_WRITE(reg, val); + mmiowb(); for (i = 0; i < len; i += 4) { I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); data++; } + mmiowb(); val |= g4x_infoframe_enable(frame); val &= ~VIDEO_DIP_FREQ_MASK; val |= VIDEO_DIP_FREQ_VSYNC; I915_WRITE(reg, val); + POSTING_READ(reg); } static void vlv_write_infoframe(struct drm_encoder *encoder, @@ -232,16 +241,19 @@ static void vlv_write_infoframe(struct drm_encoder *encoder, I915_WRITE(reg, val); + mmiowb(); for (i = 0; i < len; i += 4) { I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data); data++; } + mmiowb(); val |= g4x_infoframe_enable(frame); val &= ~VIDEO_DIP_FREQ_MASK; val |= VIDEO_DIP_FREQ_VSYNC; I915_WRITE(reg, val); + POSTING_READ(reg); } static void hsw_write_infoframe(struct drm_encoder *encoder, @@ -262,13 +274,16 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, val &= ~hsw_infoframe_enable(frame); I915_WRITE(ctl_reg, val); + mmiowb(); for (i = 0; i < len; i += 4) { I915_WRITE(data_reg + i, *data); data++; } + mmiowb(); val |= hsw_infoframe_enable(frame); I915_WRITE(ctl_reg, val); + POSTING_READ(ctl_reg); } static void intel_set_infoframe(struct drm_encoder *encoder, @@ -335,6 +350,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, return; val &= ~VIDEO_DIP_ENABLE; I915_WRITE(reg, val); + POSTING_READ(reg); return; } @@ -353,6 +369,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, if (val & VIDEO_DIP_ENABLE) { val &= ~VIDEO_DIP_ENABLE; I915_WRITE(reg, val); + POSTING_READ(reg); } val &= ~VIDEO_DIP_PORT_MASK; val |= port; @@ -362,6 +379,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, val &= ~VIDEO_DIP_ENABLE_VENDOR; I915_WRITE(reg, val); + POSTING_READ(reg); intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); @@ -385,6 +403,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, return; val &= ~VIDEO_DIP_ENABLE; I915_WRITE(reg, val); + POSTING_READ(reg); return; } @@ -406,6 +425,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, if (val & VIDEO_DIP_ENABLE) { val &= ~VIDEO_DIP_ENABLE; I915_WRITE(reg, val); + POSTING_READ(reg); } val &= ~VIDEO_DIP_PORT_MASK; val |= port; @@ -416,6 +436,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, VIDEO_DIP_ENABLE_GCP); I915_WRITE(reg, val); + POSTING_READ(reg); intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); @@ -438,6 +459,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder, return; val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI); I915_WRITE(reg, val); + POSTING_READ(reg); return; } @@ -447,6 +469,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder, VIDEO_DIP_ENABLE_GCP); I915_WRITE(reg, val); + POSTING_READ(reg); intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); @@ -469,6 +492,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder, return; val &= ~VIDEO_DIP_ENABLE; I915_WRITE(reg, val); + POSTING_READ(reg); return; } @@ -477,6 +501,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder, VIDEO_DIP_ENABLE_GCP); I915_WRITE(reg, val); + POSTING_READ(reg); intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); @@ -493,6 +518,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder, if (!intel_hdmi->has_hdmi_sink) { I915_WRITE(reg, 0); + POSTING_READ(reg); return; } @@ -500,6 +526,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder, VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW); I915_WRITE(reg, val); + POSTING_READ(reg); intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); -- cgit v1.2.3 From e5153dc09c2be01670c6dce7ac6a454f23d5c2b6 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 30 May 2012 17:15:45 +0200 Subject: drm/i915: there's no cxsr on ilk Already discovered in commit 5a117db77e47e3946d1aaa7ce8deafafd9d76746 Author: Eugeni Dodonov Date: Thu Jan 5 09:34:29 2012 -0200 drm/i915: there is no pipe CxSR on ironlake but we've failed to rip out the code from the ironlake specific code. Reviewed-by: Eugeni Dodonov Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3c71850ddf20..83ae2c889ff7 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4652,16 +4652,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, if (is_lvds && has_reduced_clock && i915_powersave) { I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2); intel_crtc->lowfreq_avail = true; - if (HAS_PIPE_CXSR(dev)) { - DRM_DEBUG_KMS("enabling CxSR downclocking\n"); - pipeconf |= PIPECONF_CXSR_DOWNCLOCK; - } } else { I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp); - if (HAS_PIPE_CXSR(dev)) { - DRM_DEBUG_KMS("disabling CxSR downclocking\n"); - pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; - } } } -- cgit v1.2.3 From 61e9653f0d9a18a0ceecbc0acb93b3297fbb8196 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 30 May 2012 14:52:26 +0200 Subject: drm/i915: reuse the sdvo tv clock adjustment in ilk mode_set Jesse extracted this nice helper in his i9xx_crtc_mode_set refactor, but we have the identical code in ironlake_ccrtc_mode_set. And that function is huge, so extracting some code full of magic numbers is always nice. Noticed while trying to get a handle on our dp clock code. Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 23 ++++------------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 83ae2c889ff7..1d801724c1db 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4395,25 +4395,10 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, &clock, &reduced_clock); } - /* SDVO TV has fixed PLL values depend on its clock range, - this mirrors vbios setting. */ - if (is_sdvo && is_tv) { - if (adjusted_mode->clock >= 100000 - && adjusted_mode->clock < 140500) { - clock.p1 = 2; - clock.p2 = 10; - clock.n = 3; - clock.m1 = 16; - clock.m2 = 8; - } else if (adjusted_mode->clock >= 140500 - && adjusted_mode->clock <= 200000) { - clock.p1 = 1; - clock.p2 = 10; - clock.n = 6; - clock.m1 = 12; - clock.m2 = 8; - } - } + + if (is_sdvo && is_tv) + i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock); + /* FDI link */ pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); -- cgit v1.2.3 From 6c982376de3f81e046e380d27079b823acadf6ad Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 24 May 2012 21:26:49 +0200 Subject: drm/i915: s/mdelay/msleep/ in the sdvo detect function A 30 ms delay is simply way too big to waste cpu cycles on. Acked-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_sdvo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index a1840f44941b..ca3c6e128594 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1371,7 +1371,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) /* add 30ms delay when the output type might be TV */ if (intel_sdvo->caps.output_flags & SDVO_TV_MASK) - mdelay(30); + msleep(30); if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) return connector_status_unknown; -- cgit v1.2.3 From e36891900855b3bed5d9cc9209655e6dfa435a5f Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Fri, 25 May 2012 16:56:22 -0700 Subject: drm/i915: Dynamic Parity Detection handling On IVB hardware we are given an interrupt whenever a L3 parity error occurs in the L3 cache. The L3 cache is used by internal GPU clients only. This is a very rare occurrence (in fact to test this I need to use specially instrumented silicon). When a row in the L3 cache detects a parity error the HW generates an interrupt. The interrupt is masked in GTIMR until we get a chance to read some registers and alert userspace via a uevent. With this information userspace can use a sysfs interface (follow-up patch) to remap those rows. Way above my level of understanding, but if a given row fails, it is statistically more likely to fail again than a row which has not failed. Therefore it is desirable for an operating system to maintain a lifelong list of failing rows and always remap any bad rows on driver load. Hardware limits the number of rows that are remappable per bank/subbank, and should more than that many rows detect parity errors, software should maintain a list of the most frequent errors, and remap those rows. V2: Drop WARN_ON(IS_GEN6) (Jesse) DRM_DEBUG row/bank/subbank on errror (Jesse) Comment updates (Jesse) Reviewed-by: Jesse Barnes Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 2 + drivers/gpu/drm/i915/i915_irq.c | 86 +++++++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 17 ++++++++ 3 files changed, 105 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a17e31e91951..504f53ee2f8a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -816,6 +816,8 @@ typedef struct drm_i915_private { struct drm_property *broadcast_rgb_property; struct drm_property *force_audio_property; + + struct work_struct parity_error_work; } drm_i915_private_t; /* Iterate over initialised rings */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 5134a62ff82f..c5266355973b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -398,6 +398,86 @@ static void gen6_pm_rps_work(struct work_struct *work) mutex_unlock(&dev_priv->dev->struct_mutex); } + +/** + * ivybridge_parity_work - Workqueue called when a parity error interrupt + * occurred. + * @work: workqueue struct + * + * Doesn't actually do anything except notify userspace. As a consequence of + * this event, userspace should try to remap the bad rows since statistically + * it is likely the same row is more likely to go bad again. + */ +static void ivybridge_parity_work(struct work_struct *work) +{ + drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, + parity_error_work); + u32 error_status, row, bank, subbank; + char *parity_event[5]; + uint32_t misccpctl; + unsigned long flags; + + /* We must turn off DOP level clock gating to access the L3 registers. + * In order to prevent a get/put style interface, acquire struct mutex + * any time we access those registers. + */ + mutex_lock(&dev_priv->dev->struct_mutex); + + misccpctl = I915_READ(GEN7_MISCCPCTL); + I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); + POSTING_READ(GEN7_MISCCPCTL); + + error_status = I915_READ(GEN7_L3CDERRST1); + row = GEN7_PARITY_ERROR_ROW(error_status); + bank = GEN7_PARITY_ERROR_BANK(error_status); + subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); + + I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | + GEN7_L3CDERRST1_ENABLE); + POSTING_READ(GEN7_L3CDERRST1); + + I915_WRITE(GEN7_MISCCPCTL, misccpctl); + + spin_lock_irqsave(&dev_priv->irq_lock, flags); + dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + + mutex_unlock(&dev_priv->dev->struct_mutex); + + parity_event[0] = "L3_PARITY_ERROR=1"; + parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); + parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); + parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); + parity_event[4] = NULL; + + kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, + KOBJ_CHANGE, parity_event); + + DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", + row, bank, subbank); + + kfree(parity_event[3]); + kfree(parity_event[2]); + kfree(parity_event[1]); +} + +void ivybridge_handle_parity_error(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + unsigned long flags; + + if (!IS_IVYBRIDGE(dev)) + return; + + spin_lock_irqsave(&dev_priv->irq_lock, flags); + dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + + queue_work(dev_priv->wq, &dev_priv->parity_error_work); +} + static void snb_gt_irq_handler(struct drm_device *dev, struct drm_i915_private *dev_priv, u32 gt_iir) @@ -417,6 +497,9 @@ static void snb_gt_irq_handler(struct drm_device *dev, DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); i915_handle_error(dev, false); } + + if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT) + ivybridge_handle_parity_error(dev); } static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, @@ -1641,6 +1724,9 @@ static void ironlake_irq_preinstall(struct drm_device *dev) atomic_set(&dev_priv->irq_received, 0); + if (IS_IVYBRIDGE(dev)) + INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work); + I915_WRITE(HWSTAM, 0xeffe); /* XXX hotplug from PCH */ diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 1855ac732718..2f31df0dde48 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4094,6 +4094,23 @@ #define GEN6_RC6 3 #define GEN6_RC7 4 +#define GEN7_MISCCPCTL (0x9424) +#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0) + +/* IVYBRIDGE DPF */ +#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */ +#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14) +#define GEN7_PARITY_ERROR_VALID (1<<13) +#define GEN7_L3CDERRST1_BANK_MASK (3<<11) +#define GEN7_L3CDERRST1_SUBBANK_MASK (7<<8) +#define GEN7_PARITY_ERROR_ROW(reg) \ + ((reg & GEN7_L3CDERRST1_ROW_MASK) >> 14) +#define GEN7_PARITY_ERROR_BANK(reg) \ + ((reg & GEN7_L3CDERRST1_BANK_MASK) >> 11) +#define GEN7_PARITY_ERROR_SUBBANK(reg) \ + ((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8) +#define GEN7_L3CDERRST1_ENABLE (1<<7) + #define G4X_AUD_VID_DID 0x62020 #define INTEL_AUDIO_DEVCL 0x808629FB #define INTEL_AUDIO_DEVBLC 0x80862801 -- cgit v1.2.3 From 15b9f80e008f584d1a3835bb5eba194080e4e750 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Fri, 25 May 2012 16:56:23 -0700 Subject: drm/i915: enable parity error interrupts The previous patch put all the code, and handlers in place. It should now be safe to enable the parity error interrupt. The parity error must be unmasked in both the GTIMR, and the CS IMR. Unfortunately, the docs aren't clear about this; nevertheless it's the truth. Reviewed-by: Jesse Barnes Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 4 ++-- drivers/gpu/drm/i915/intel_ringbuffer.c | 14 ++++++++++++-- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index c5266355973b..4a457521d76e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1889,13 +1889,13 @@ static int ivybridge_irq_postinstall(struct drm_device *dev) DE_PIPEA_VBLANK_IVB); POSTING_READ(DEIER); - dev_priv->gt_irq_mask = ~0; + dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(GTIMR, dev_priv->gt_irq_mask); render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | - GEN6_BLITTER_USER_INTERRUPT; + GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT; I915_WRITE(GTIER, render_irqs); POSTING_READ(GTIER); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 1df1694835a1..89a5e7f89d7a 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -427,6 +427,9 @@ static int init_render_ring(struct intel_ring_buffer *ring) if (INTEL_INFO(dev)->gen >= 6) I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); + if (IS_IVYBRIDGE(dev)) + I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); + return ret; } @@ -814,7 +817,11 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (ring->irq_refcount++ == 0) { - I915_WRITE_IMR(ring, ~ring->irq_enable_mask); + if (IS_IVYBRIDGE(dev) && ring->id == RCS) + I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | + GEN6_RENDER_L3_PARITY_ERROR)); + else + I915_WRITE_IMR(ring, ~ring->irq_enable_mask); dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; I915_WRITE(GTIMR, dev_priv->gt_irq_mask); POSTING_READ(GTIMR); @@ -833,7 +840,10 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--ring->irq_refcount == 0) { - I915_WRITE_IMR(ring, ~0); + if (IS_IVYBRIDGE(dev) && ring->id == RCS) + I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); + else + I915_WRITE_IMR(ring, ~0); dev_priv->gt_irq_mask |= ring->irq_enable_mask; I915_WRITE(GTIMR, dev_priv->gt_irq_mask); POSTING_READ(GTIMR); -- cgit v1.2.3 From b9524a1e1c48cf461768914345ec94be6a15e710 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Fri, 25 May 2012 16:56:24 -0700 Subject: drm/i915: remap l3 on hw init If any l3 rows have been previously remapped, we must remap them after GPU reset/resume too. v2: Just return (no warn) on remapping init if not IVB (Jesse) Move the check of schizo userspace to i915_gem_l3_remap (Jesse) Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 3 +++ drivers/gpu/drm/i915/i915_gem.c | 34 ++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 3 +++ 3 files changed, 40 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 504f53ee2f8a..470c73219e6b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -656,6 +656,8 @@ typedef struct drm_i915_private { /** PPGTT used for aliasing the PPGTT with the GTT */ struct i915_hw_ppgtt *aliasing_ppgtt; + u32 *l3_remap_info; + struct shrinker inactive_shrinker; /** @@ -1309,6 +1311,7 @@ int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); int __must_check i915_gem_init(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev); +void i915_gem_l3_remap(struct drm_device *dev); void i915_gem_init_swizzling(struct drm_device *dev); void i915_gem_init_ppgtt(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d2eaa008573b..1c08e0900eff 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3527,6 +3527,38 @@ i915_gem_idle(struct drm_device *dev) return 0; } +void i915_gem_l3_remap(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + u32 misccpctl; + int i; + + if (!IS_IVYBRIDGE(dev)) + return; + + if (!dev_priv->mm.l3_remap_info) + return; + + misccpctl = I915_READ(GEN7_MISCCPCTL); + I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); + POSTING_READ(GEN7_MISCCPCTL); + + for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { + u32 remap = I915_READ(GEN7_L3LOG_BASE + i); + if (remap && remap != dev_priv->mm.l3_remap_info[i/4]) + DRM_DEBUG("0x%x was already programmed to %x\n", + GEN7_L3LOG_BASE + i, remap); + if (remap && !dev_priv->mm.l3_remap_info[i/4]) + DRM_DEBUG_DRIVER("Clearing remapped register\n"); + I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]); + } + + /* Make sure all the writes land before disabling dop clock gating */ + POSTING_READ(GEN7_L3LOG_BASE); + + I915_WRITE(GEN7_MISCCPCTL, misccpctl); +} + void i915_gem_init_swizzling(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -3616,6 +3648,8 @@ i915_gem_init_hw(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; int ret; + i915_gem_l3_remap(dev); + i915_gem_init_swizzling(dev); ret = intel_init_render_ring_buffer(dev); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 2f31df0dde48..7dcc04f2143e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4111,6 +4111,9 @@ ((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8) #define GEN7_L3CDERRST1_ENABLE (1<<7) +#define GEN7_L3LOG_BASE 0xB070 +#define GEN7_L3LOG_SIZE 0x80 + #define G4X_AUD_VID_DID 0x62020 #define INTEL_AUDIO_DEVCL 0x808629FB #define INTEL_AUDIO_DEVBLC 0x80862801 -- cgit v1.2.3 From 84bc758124127a5b16b30f9a8bf5f1a981affc6b Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Fri, 25 May 2012 16:56:25 -0700 Subject: drm/i915: l3 parity sysfs interface Dumb binary interfaces which allow root-only updates of the cache remapping registers. As mentioned in a previous patch, software using this interface needs to know about HW limits, and other programming considerations as the kernel interface does no checking for these things on the root-only interface. v1: Drop extra posting reads (Chris) Return negative values in the sysfs interfaces on errors (Chris) v2: Return -EINVAL for offset % 4 (Jesse) Move schizo userspace check out (Jesse) Cleaner sysfs item initializers (Daniel) Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_sysfs.c | 121 +++++++++++++++++++++++++++++++++++++- 1 file changed, 119 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 79f83445afa0..c2013273a4c6 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -29,6 +29,7 @@ #include #include #include +#include "intel_drv.h" #include "i915_drv.h" static u32 calc_residency(struct drm_device *dev, const u32 reg) @@ -92,20 +93,136 @@ static struct attribute_group rc6_attr_group = { .attrs = rc6_attrs }; +static int l3_access_valid(struct drm_device *dev, loff_t offset) +{ + if (!IS_IVYBRIDGE(dev)) + return -EPERM; + + if (offset % 4 != 0) + return -EINVAL; + + if (offset >= GEN7_L3LOG_SIZE) + return -ENXIO; + + return 0; +} + +static ssize_t +i915_l3_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t offset, size_t count) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); + struct drm_device *drm_dev = dminor->dev; + struct drm_i915_private *dev_priv = drm_dev->dev_private; + uint32_t misccpctl; + int i, ret; + + ret = l3_access_valid(drm_dev, offset); + if (ret) + return ret; + + ret = i915_mutex_lock_interruptible(drm_dev); + if (ret) + return ret; + + misccpctl = I915_READ(GEN7_MISCCPCTL); + I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); + + for (i = offset; count >= 4 && i < GEN7_L3LOG_SIZE; i += 4, count -= 4) + *((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + i); + + I915_WRITE(GEN7_MISCCPCTL, misccpctl); + + mutex_unlock(&drm_dev->struct_mutex); + + return i - offset; +} + +static ssize_t +i915_l3_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t offset, size_t count) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); + struct drm_device *drm_dev = dminor->dev; + struct drm_i915_private *dev_priv = drm_dev->dev_private; + u32 *temp = NULL; /* Just here to make handling failures easy */ + int ret; + + ret = l3_access_valid(drm_dev, offset); + if (ret) + return ret; + + ret = i915_mutex_lock_interruptible(drm_dev); + if (ret) + return ret; + + if (!dev_priv->mm.l3_remap_info) { + temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); + if (!temp) { + mutex_unlock(&drm_dev->struct_mutex); + return -ENOMEM; + } + } + + ret = i915_gpu_idle(drm_dev); + if (ret) { + kfree(temp); + mutex_unlock(&drm_dev->struct_mutex); + return ret; + } + + /* TODO: Ideally we really want a GPU reset here to make sure errors + * aren't propagated. Since I cannot find a stable way to reset the GPU + * at this point it is left as a TODO. + */ + if (temp) + dev_priv->mm.l3_remap_info = temp; + + memcpy(dev_priv->mm.l3_remap_info + (offset/4), + buf + (offset/4), + count); + + i915_gem_l3_remap(drm_dev); + + mutex_unlock(&drm_dev->struct_mutex); + + return count; +} + +static struct bin_attribute dpf_attrs = { + .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)}, + .size = GEN7_L3LOG_SIZE, + .read = i915_l3_read, + .write = i915_l3_write, + .mmap = NULL +}; + void i915_setup_sysfs(struct drm_device *dev) { int ret; - /* ILK doesn't have any residency information */ + /* ILK and below don't yet have relevant sysfs files */ if (INTEL_INFO(dev)->gen < 6) return; ret = sysfs_merge_group(&dev->primary->kdev.kobj, &rc6_attr_group); if (ret) - DRM_ERROR("sysfs setup failed\n"); + DRM_ERROR("RC6 residency sysfs setup failed\n"); + + if (!IS_IVYBRIDGE(dev)) + return; + + ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs); + if (ret) + DRM_ERROR("l3 parity sysfs setup failed\n"); } void i915_teardown_sysfs(struct drm_device *dev) { + device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group); } -- cgit v1.2.3 From d2ba8470ccfc90e0966c802632f8bb552f11143a Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 31 May 2012 14:57:41 +0200 Subject: drm/i915: ivybridge_handle_parity_error should be static Notice by Fengguang Wu's automatic sparse checker. Reported-by: Fengguang Wu Reviewed-by: Ben Widawsky Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4a457521d76e..53356f44aba3 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -462,7 +462,7 @@ static void ivybridge_parity_work(struct work_struct *work) kfree(parity_event[1]); } -void ivybridge_handle_parity_error(struct drm_device *dev) +static void ivybridge_handle_parity_error(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long flags; -- cgit v1.2.3 From 98fd81cd64674545a30a4f95388f086a626d37d2 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 31 May 2012 14:57:42 +0200 Subject: drm/i915: initialize the parity work only once This fixes an (albeit really hard to hit) race resulting in an oops: - The parity work get scheduled. - We re-init the irq state and call INIT_WORK again. - The workqueue code tries to run the work item and stumbles over a work item that should be on it's runlist. Also initiliaze the work item unconditionally like all the others, it's simpler. Reviewed-by: Ben Widawsky Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 53356f44aba3..6553dcc2ca79 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1723,10 +1723,6 @@ static void ironlake_irq_preinstall(struct drm_device *dev) atomic_set(&dev_priv->irq_received, 0); - - if (IS_IVYBRIDGE(dev)) - INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work); - I915_WRITE(HWSTAM, 0xeffe); /* XXX hotplug from PCH */ @@ -2647,6 +2643,7 @@ void intel_irq_init(struct drm_device *dev) INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); INIT_WORK(&dev_priv->error_work, i915_error_work_func); INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work); + INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work); dev->driver->get_vblank_counter = i915_get_vblank_counter; dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ -- cgit v1.2.3 From 112abd291db7d47974f166e742104d761bc76977 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 31 May 2012 14:57:43 +0200 Subject: drm/i915: simplify sysfs setup code Positively checking for the required feature/gen is simpler than build a cascade of negative "we need to bail" checks. And the later won't scale if we add more stuff that doesn't fit in nicely. Reviewed-by: Ben Widawsky Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_sysfs.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index c2013273a4c6..2f5388af8df9 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -205,20 +205,18 @@ void i915_setup_sysfs(struct drm_device *dev) { int ret; - /* ILK and below don't yet have relevant sysfs files */ - if (INTEL_INFO(dev)->gen < 6) - return; - - ret = sysfs_merge_group(&dev->primary->kdev.kobj, &rc6_attr_group); - if (ret) - DRM_ERROR("RC6 residency sysfs setup failed\n"); - - if (!IS_IVYBRIDGE(dev)) - return; + if (INTEL_INFO(dev)->gen >= 6) { + ret = sysfs_merge_group(&dev->primary->kdev.kobj, + &rc6_attr_group); + if (ret) + DRM_ERROR("RC6 residency sysfs setup failed\n"); + } - ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs); - if (ret) - DRM_ERROR("l3 parity sysfs setup failed\n"); + if (IS_IVYBRIDGE(dev)) { + ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs); + if (ret) + DRM_ERROR("l3 parity sysfs setup failed\n"); + } } void i915_teardown_sysfs(struct drm_device *dev) -- cgit v1.2.3 From 493a708179469e3978e50f59902e9d47b6f3dabd Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 30 May 2012 12:31:56 +0200 Subject: drm/i915: clarify IBX dp workaround Instead of checking for !CPT, check for IBX to make it clearer that this is a IBX-specific workaround. No functional change because we smash the PPT PCH into the HAS_PCH_CPT check and atm DP isn't enabled on the haswell LPT PCH yet. See Bspec Vol 3, Part 3, Section 4.[3-5].1 about DP[BCD], bit 30 for details of this workaround. Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_dp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index c71e7890e6f6..ade98e0bca84 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1914,7 +1914,7 @@ intel_dp_link_down(struct intel_dp *intel_dp) DP |= DP_LINK_TRAIN_OFF; } - if (!HAS_PCH_CPT(dev) && + if (HAS_PCH_IBX(dev) && I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { struct drm_crtc *crtc = intel_dp->base.base.crtc; -- cgit v1.2.3 From 30dfebf34b9930277d83b25ec740510007cc4c6d Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 1 Jun 2012 15:21:23 +0200 Subject: drm/i915: extract object active state flushing code Both busy_ioctl and the new wait_ioct need to do the same dance (or at least should). Some slight changes: - busy_ioctl now unconditionally checks for olr. Before emitting a require flush would have prevent the olr check and hence required a second call to the busy ioctl to really emit the request. - the timeout wait now also retires request. Not really required for abi-reasons, but makes a notch more sense imo. I've tested this by pimping the i-g-t test some more and also checking the polling behviour of the wait_rendering_timeout ioctl versus what busy_ioctl returns. v2: Too many people complained about unplug, new color is flush_active. v3: Kill the comment about the unplug moniker. v4: s/un-active/inactive/ Reviewed-by: Ben Widawsky Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 61 ++++++++++++++++++++--------------------- 1 file changed, 29 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a20ac438b8ef..af67803e635f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2029,6 +2029,31 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj) return 0; } +/** + * Ensures that an object will eventually get non-busy by flushing any required + * write domains, emitting any outstanding lazy request and retiring and + * completed requests. + */ +static int +i915_gem_object_flush_active(struct drm_i915_gem_object *obj) +{ + int ret; + + if (obj->active) { + ret = i915_gem_object_flush_gpu_write_domain(obj); + if (ret) + return ret; + + ret = i915_gem_check_olr(obj->ring, + obj->last_rendering_seqno); + if (ret) + return ret; + i915_gem_retire_requests_ring(obj->ring); + } + + return 0; +} + /** * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT * @DRM_IOCTL_ARGS: standard ioctl arguments @@ -2073,11 +2098,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) return -ENOENT; } - /* Need to make sure the object is flushed first. This non-obvious - * flush is required to enforce that (active && !olr) == no wait - * necessary. - */ - ret = i915_gem_object_flush_gpu_write_domain(obj); + /* Need to make sure the object gets inactive eventually. */ + ret = i915_gem_object_flush_active(obj); if (ret) goto out; @@ -2089,10 +2111,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) if (seqno == 0) goto out; - ret = i915_gem_check_olr(ring, seqno); - if (ret) - goto out; - /* Do this after OLR check to make sure we make forward progress polling * on this IOCTL with a 0 timeout (like busy ioctl) */ @@ -3330,30 +3348,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * become non-busy without any further actions, therefore emit any * necessary flushes here. */ - args->busy = obj->active; - if (args->busy) { - /* Unconditionally flush objects, even when the gpu still uses this - * object. Userspace calling this function indicates that it wants to - * use this buffer rather sooner than later, so issuing the required - * flush earlier is beneficial. - */ - if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { - ret = i915_gem_flush_ring(obj->ring, - 0, obj->base.write_domain); - } else { - ret = i915_gem_check_olr(obj->ring, - obj->last_rendering_seqno); - } + ret = i915_gem_object_flush_active(obj); - /* Update the active list for the hardware's current position. - * Otherwise this only updates on a delayed timer or when irqs - * are actually unmasked, and our working set ends up being - * larger than required. - */ - i915_gem_retire_requests_ring(obj->ring); - - args->busy = obj->active; - } + args->busy = obj->active; drm_gem_object_unreference(&obj->base); unlock: -- cgit v1.2.3 From 94bf2cedbc22f8952ebbbaa085620d7d0328fced Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 4 Jun 2012 18:39:19 +0200 Subject: drm/i915: compute the target_clock for edp directly ... instead of abusing mode->clock by storing it in there - we shouldn't touch that one at all. This patch is the first prep step to constify the mode argument of the intel_dp_mode_fixup function. The next patch will stop us from modifying mode->clock. Reviewed-by: Chris Wilson Reviewed-by: Jesse Barnes Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 16 ++++++++-------- drivers/gpu/drm/i915/intel_dp.c | 12 ++++++++++++ drivers/gpu/drm/i915/intel_drv.h | 2 ++ 3 files changed, 22 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9f5148acf73c..0161d947ab81 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4416,16 +4416,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, /* CPU eDP doesn't require FDI link, so just set DP M/N according to current link config */ if (is_cpu_edp) { - target_clock = mode->clock; intel_edp_link_config(edp_encoder, &lane, &link_bw); } else { - /* [e]DP over FDI requires target mode clock - instead of link clock */ - if (is_dp) - target_clock = mode->clock; - else - target_clock = adjusted_mode->clock; - /* FDI is a binary signal running at ~2.7GHz, encoding * each output octet as 10 bits. The actual frequency * is stored as a divider into a 100MHz clock, and the @@ -4436,6 +4428,14 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; } + /* [e]DP over FDI requires target mode clock instead of link clock. */ + if (edp_encoder) + target_clock = intel_edp_target_clock(edp_encoder, mode); + else if (is_dp) + target_clock = mode->clock; + else + target_clock = adjusted_mode->clock; + /* determine panel color depth */ temp = I915_READ(PIPECONF(pipe)); temp &= ~PIPE_BPC_MASK; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index ade98e0bca84..ae7c62340129 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -152,6 +152,18 @@ intel_edp_link_config(struct intel_encoder *intel_encoder, *link_bw = 270000; } +int +intel_edp_target_clock(struct intel_encoder *intel_encoder, + struct drm_display_mode *mode) +{ + struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); + + if (intel_dp->panel_fixed_mode) + return intel_dp->panel_fixed_mode->clock; + else + return mode->clock; +} + static int intel_dp_max_lane_count(struct intel_dp *intel_dp) { diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 39d7b07c1e8b..6a2ae30ee519 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -359,6 +359,8 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); extern bool intel_dpd_is_edp(struct drm_device *dev); extern void intel_edp_link_config(struct intel_encoder *, int *, int *); +extern int intel_edp_target_clock(struct intel_encoder *, + struct drm_display_mode *mode); extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder); extern int intel_plane_init(struct drm_device *dev, enum pipe pipe); extern void intel_flush_display_plane(struct drm_i915_private *dev_priv, -- cgit v1.2.3 From 71244653a8fb0f46bc12ae421f1d5f72af6a75da Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 4 Jun 2012 18:39:20 +0200 Subject: drm/i915: adjusted_mode->clock in the dp mode_fixup ... instead of changing mode->clock, which we should leave as-is. After the previous patch we only touch that if it's a panel, and then adjusted mode->clock equals adjusted_mode->clock. Outside of intel_dp.c we only use ajusted_mode->clock in the mode_set functions. Within intel_dp.c we only use it to calculate the dp dithering and link bw parameters, so that's the only thing we need to fix up. As a temporary ugliness (until the cleanup in the next patch) we pass the adjusted_mode into dp_dither for both parameters (because that one still looks at mode->clock). Note that we do overwrite adjusted_mode->clock with the selected dp link clock, but that only happens after we've calculated everything we need based on the dotclock of the adjusted output configuration. Outside of intel_dp.c only intel_display.c uses adjusted_mode->clock, and that stays the same after this patch (still equals the selected dp link clock). intel_display.c also needs the actual dotclock (as target_clock), but that has been fixed up in the previous patch. v2: Adjust the debug message to also use adjusted_mode->clock. Reviewed-by: Chris Wilson Reviewed-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_dp.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index ae7c62340129..90cb39e89e61 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -710,11 +710,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode); intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, mode, adjusted_mode); - /* - * the mode->clock is used to calculate the Data&Link M/N - * of the pipe. For the eDP the fixed clock should be used. - */ - mode->clock = intel_dp->panel_fixed_mode->clock; } if (mode->flags & DRM_MODE_FLAG_DBLCLK) @@ -722,13 +717,13 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, DRM_DEBUG_KMS("DP link computation with max lane count %i " "max bw %02x pixel clock %iKHz\n", - max_lane_count, bws[max_clock], mode->clock); + max_lane_count, bws[max_clock], adjusted_mode->clock); - if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode)) + if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, adjusted_mode)) return false; bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; - mode_rate = intel_dp_link_required(mode->clock, bpp); + mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { for (clock = 0; clock <= max_clock; clock++) { -- cgit v1.2.3 From cb1793ce921c7e073f9055847a5bc868ab84244c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 4 Jun 2012 18:39:21 +0200 Subject: drm/i915: don't chnage the original mode in dp_mode_fixup We should only frob adjusted_mode. This is in preparation of a massive patch by Laurent Pinchart to make the mode argument const. After the previous two prep patches the only thing left is to clean up things a bit. I've opted to pass in an adjust_mode param to dp_adjust_dithering because that way we can be sure to avoid duplicating this logic between mode_valid and mode_fixup - which was the cause behind a dp link bw calculation bug in the past. Also mark the mode argument of pch_panel_fitting const. v2: Split up the mode->clock => adjusted_mode->clock change, as suggested by Chris Wilson. Reported-by: Laurent Pinchart Reviewed-by: Chris Wilson Reviewed-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_dp.c | 12 ++++++------ drivers/gpu/drm/i915/intel_drv.h | 2 +- drivers/gpu/drm/i915/intel_panel.c | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 90cb39e89e61..6538c46fe959 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -234,7 +234,7 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes) static bool intel_dp_adjust_dithering(struct intel_dp *intel_dp, struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) + bool adjust_mode) { int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); int max_lanes = intel_dp_max_lane_count(intel_dp); @@ -248,8 +248,8 @@ intel_dp_adjust_dithering(struct intel_dp *intel_dp, if (mode_rate > max_rate) return false; - if (adjusted_mode) - adjusted_mode->private_flags + if (adjust_mode) + mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC; return true; @@ -272,7 +272,7 @@ intel_dp_mode_valid(struct drm_connector *connector, return MODE_PANEL; } - if (!intel_dp_adjust_dithering(intel_dp, mode, NULL)) + if (!intel_dp_adjust_dithering(intel_dp, mode, false)) return MODE_CLOCK_HIGH; if (mode->clock < 10000) @@ -712,14 +712,14 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, mode, adjusted_mode); } - if (mode->flags & DRM_MODE_FLAG_DBLCLK) + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) return false; DRM_DEBUG_KMS("DP link computation with max lane count %i " "max bw %02x pixel clock %iKHz\n", max_lane_count, bws[max_clock], adjusted_mode->clock); - if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, adjusted_mode)) + if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true)) return false; bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 6a2ae30ee519..c35edd7ca84d 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -373,7 +373,7 @@ extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode); extern void intel_pch_panel_fitting(struct drm_device *dev, int fitting_mode, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); extern u32 intel_panel_get_max_backlight(struct drm_device *dev); extern u32 intel_panel_get_backlight(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 2a1625d84a69..7180cc828f94 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -56,7 +56,7 @@ intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, void intel_pch_panel_fitting(struct drm_device *dev, int fitting_mode, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_i915_private *dev_priv = dev->dev_private; -- cgit v1.2.3 From 1523c310b3ed964b71a8db16f70c3bc21cc0642e Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 25 May 2012 12:34:54 -0700 Subject: drm/i915: add min freq control to debugfs This makes for easier benchmarking and testing. One can set a fixed frequency by setting min and max to the same value. v2: fix whitespace & comment (Eugeni) Signed-off-by: Jesse Barnes Reviewed-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 66 +++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 5363e9c66c27..4fa00fcfbc96 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1764,6 +1764,64 @@ static const struct file_operations i915_max_freq_fops = { .llseek = default_llseek, }; +static ssize_t +i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max, + loff_t *ppos) +{ + struct drm_device *dev = filp->private_data; + drm_i915_private_t *dev_priv = dev->dev_private; + char buf[80]; + int len; + + len = snprintf(buf, sizeof(buf), + "min freq: %d\n", dev_priv->min_delay * 50); + + if (len > sizeof(buf)) + len = sizeof(buf); + + return simple_read_from_buffer(ubuf, max, ppos, buf, len); +} + +static ssize_t +i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt, + loff_t *ppos) +{ + struct drm_device *dev = filp->private_data; + struct drm_i915_private *dev_priv = dev->dev_private; + char buf[20]; + int val = 1; + + if (cnt > 0) { + if (cnt > sizeof(buf) - 1) + return -EINVAL; + + if (copy_from_user(buf, ubuf, cnt)) + return -EFAULT; + buf[cnt] = 0; + + val = simple_strtoul(buf, NULL, 0); + } + + DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val); + + /* + * Turbo will still be enabled, but won't go below the set value. + */ + dev_priv->min_delay = val / 50; + + gen6_set_rps(dev, val / 50); + + return cnt; +} + +static const struct file_operations i915_min_freq_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = i915_min_freq_read, + .write = i915_min_freq_write, + .llseek = default_llseek, +}; + static ssize_t i915_cache_sharing_read(struct file *filp, char __user *ubuf, @@ -1996,6 +2054,12 @@ int i915_debugfs_init(struct drm_minor *minor) if (ret) return ret; + ret = i915_debugfs_create(minor->debugfs_root, minor, + "i915_min_freq", + &i915_min_freq_fops); + if (ret) + return ret; + ret = i915_debugfs_create(minor->debugfs_root, minor, "i915_cache_sharing", &i915_cache_sharing_fops); @@ -2028,6 +2092,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor) 1, minor); drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops, 1, minor); + drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops, + 1, minor); drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, 1, minor); drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops, -- cgit v1.2.3 From 7a87c289778c5ee7ef79be5fba27e081383a30f3 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 5 Jun 2012 11:03:39 +0200 Subject: drm/i915: implement IBX hdmi transcoder select workaround Bspec Vol 3, Part 3, Section 3.8.1.1, bit 30: "[DevIBX] Writing to this bit only takes effect when port is enabled. Due to hardware issue it is required that this bit be cleared when port is disabled. To clear this bit software must temporarily enable this port on transcoder A." Unfortunately the public Bspec misses totally out on the same language for HDMIB. Internal Bspec also mentions that one of the bad side-effects is that DPx can fail to light up on transcoder A if HDMIx is disabled but using transcoder B. I've found this while reviewing Bsepc. We already implement the same workaround for the DP ports. Also replace a magic 1 with PIPE_B I've found while looking through the code. v2: Implement suggestions from Chris Wilson: - add pipe variable to cut down on code noise - write the reg value twice to w/a hw issues (Bspec is unclear on which bit actually require the write twice stuff, but better be paranoid about it) - untangle the if logic Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_hdmi.c | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index b507d38faa10..69637db9af1f 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -569,7 +569,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, if (HAS_PCH_CPT(dev)) sdvox |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); - else if (intel_crtc->pipe == 1) + else if (intel_crtc->pipe == PIPE_B) sdvox |= SDVO_PIPE_B_SELECT; I915_WRITE(intel_hdmi->sdvox_reg, sdvox); @@ -591,6 +591,36 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) temp = I915_READ(intel_hdmi->sdvox_reg); + /* HW workaround for IBX, we need to move the port to transcoder A + * before disabling it. */ + if (HAS_PCH_IBX(dev)) { + struct drm_crtc *crtc = encoder->crtc; + int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1; + + if (mode != DRM_MODE_DPMS_ON) { + if (temp & SDVO_PIPE_B_SELECT) { + temp &= ~SDVO_PIPE_B_SELECT; + I915_WRITE(intel_hdmi->sdvox_reg, temp); + POSTING_READ(intel_hdmi->sdvox_reg); + + /* Again we need to write this twice. */ + I915_WRITE(intel_hdmi->sdvox_reg, temp); + POSTING_READ(intel_hdmi->sdvox_reg); + + /* Transcoder selection bits only update + * effectively on vblank. */ + if (crtc) + intel_wait_for_vblank(dev, pipe); + else + msleep(50); + } + } else { + /* Restore the transcoder select bit. */ + if (pipe == PIPE_B) + enable_bits |= SDVO_PIPE_B_SELECT; + } + } + /* HW workaround, need to toggle enable bit off and on for 12bpc, but * we do this anyway which shows more stable in testing. */ -- cgit v1.2.3 From de9a35abb3b343a25065449234e47a76c4f3454a Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 5 Jun 2012 11:03:40 +0200 Subject: drm/i915: assert that the IBX port transcoder select w/a is implemented Let's be a bit more paranoid here. Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0161d947ab81..5e760319c0f2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1232,6 +1232,9 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", reg, pipe_name(pipe)); + + WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_PIPE_B_SELECT), + "IBX PCH dp port still using transcoder B\n"); } static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, @@ -1241,6 +1244,9 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, WARN(hdmi_pipe_enabled(dev_priv, val, pipe), "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", reg, pipe_name(pipe)); + + WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_PIPE_B_SELECT), + "IBX PCH hdmi port still using transcoder B\n"); } static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, -- cgit v1.2.3 From eac1f14fd1e7243aa782ef85f2a217e0c3a709ad Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Tue, 5 Jun 2012 15:24:24 -0700 Subject: drm/i915: Inifite timeout for wait ioctl Change the ns_timeout parameter of the wait ioctl to a signed value. Doing this allows the kernel to provide an infinite wait when a timeout of less than 0 is provided. This mimics select/poll. Initially the parameter was meant to match up with the GL spec 1:1, but after being made aware of how much 2^64 - 1 nanoseconds actually is, I do not think anyone will ever notice the loss of 1 bit. The infinite timeout on waiting is similar to the existing i915 userspace interface with the exception that struct_mutex is dropped while doing the wait in this ioctl. Cc: Chris Wilson Signed-off-by: Ben Widawsky Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 15 ++++++++++----- include/drm/i915_drm.h | 2 +- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index af67803e635f..deaa0d4bb456 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2082,11 +2082,14 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) struct drm_i915_gem_wait *args = data; struct drm_i915_gem_object *obj; struct intel_ring_buffer *ring = NULL; - struct timespec timeout; + struct timespec timeout_stack, *timeout = NULL; u32 seqno = 0; int ret = 0; - timeout = ns_to_timespec(args->timeout_ns); + if (args->timeout_ns >= 0) { + timeout_stack = ns_to_timespec(args->timeout_ns); + timeout = &timeout_stack; + } ret = i915_mutex_lock_interruptible(dev); if (ret) @@ -2122,9 +2125,11 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); - ret = __wait_seqno(ring, seqno, true, &timeout); - WARN_ON(!timespec_valid(&timeout)); - args->timeout_ns = timespec_to_ns(&timeout); + ret = __wait_seqno(ring, seqno, true, timeout); + if (timeout) { + WARN_ON(!timespec_valid(timeout)); + args->timeout_ns = timespec_to_ns(timeout); + } return ret; out: diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index bab174334da5..aae346e7f635 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -893,7 +893,7 @@ struct drm_i915_gem_wait { __u32 bo_handle; __u32 flags; /** Number of nanoseconds to wait, Returns time remaining. */ - __u64 timeout_ns; + __s64 timeout_ns; }; #endif /* _I915_DRM_H_ */ -- cgit v1.2.3 From 172cf15d18889313bf2c3bfb81fcea08369274ef Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Tue, 5 Jun 2012 15:24:25 -0700 Subject: drm/i915: Add wait render timeout get param Signed-off-by: Ben Widawsky Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 3 +++ include/drm/i915_drm.h | 1 + 2 files changed, 4 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 262a74d1f852..97a5a5857f5b 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1006,6 +1006,9 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_HAS_ALIASING_PPGTT: value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; break; + case I915_PARAM_HAS_WAIT_TIMEOUT: + value = 1; + break; default: DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index aae346e7f635..5c28ee1d1911 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -300,6 +300,7 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_HAS_GEN7_SOL_RESET 16 #define I915_PARAM_HAS_LLC 17 #define I915_PARAM_HAS_ALIASING_PPGTT 18 +#define I915_PARAM_HAS_WAIT_TIMEOUT 19 typedef struct drm_i915_getparam { int param; -- cgit v1.2.3 From afba018898ae54b498e82b3cd4d2b61c74032c90 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 12 Jun 2012 16:36:45 +0200 Subject: drm/i915: ensure HDMI port is disabled inside set_infoframes This function is supposed to be used at mode set time, so prevent against future mistakes by adding a WARN(). Based on a patch by Paulo Zanoni, with the check extracted into a little assert_hdmi_port_disabled helper added to make things self documenting and move the assert stuff out of line. [fixed up spelling goof-up while applying.] Cc: Paulo Zanoni Reviewed-by: Eugeni Dodonov Reviewed-by: Paulo Zanoni Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_hdmi.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 69637db9af1f..5b2c88ca6edb 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -37,6 +37,19 @@ #include "i915_drm.h" #include "i915_drv.h" +static void +assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) +{ + struct drm_device *dev = intel_hdmi->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t enabled_bits; + + enabled_bits = IS_HASWELL(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; + + WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits, + "HDMI port enabled, expecting disabled\n"); +} + struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) { return container_of(encoder, struct intel_hdmi, base.base); @@ -334,6 +347,8 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, u32 val = I915_READ(reg); u32 port; + assert_hdmi_port_disabled(intel_hdmi); + /* If the registers were not initialized yet, they might be zeroes, * which means we're selecting the AVI DIP and we're setting its * frequency to once. This seems to really confuse the HW and make @@ -395,6 +410,8 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, u32 val = I915_READ(reg); u32 port; + assert_hdmi_port_disabled(intel_hdmi); + /* See the big comment in g4x_set_infoframes() */ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; @@ -451,6 +468,8 @@ static void cpt_set_infoframes(struct drm_encoder *encoder, u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); + assert_hdmi_port_disabled(intel_hdmi); + /* See the big comment in g4x_set_infoframes() */ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; @@ -484,6 +503,8 @@ static void vlv_set_infoframes(struct drm_encoder *encoder, u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); + assert_hdmi_port_disabled(intel_hdmi); + /* See the big comment in g4x_set_infoframes() */ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC; @@ -516,6 +537,8 @@ static void hsw_set_infoframes(struct drm_encoder *encoder, u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); + assert_hdmi_port_disabled(intel_hdmi); + if (!intel_hdmi->has_hdmi_sink) { I915_WRITE(reg, 0); POSTING_READ(reg); -- cgit v1.2.3 From 534b5a5341cb7e16a98d44623d8fce9464ebf22c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 5 Jun 2012 10:07:08 +0200 Subject: drm/i915: pnv has a backlight polarity control bit, too We already correctly ignore bit0 on gen < 4, now we also know why ;-) I've decided that losing that single bit of precision isn't worth the trouble to sprinkle IS_PINEVIEW checks all over the backlight control code - that code is way too fragile imo. Reviewed-by: Eugeni Dodonov Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 7dcc04f2143e..20244b971fc6 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1828,6 +1828,8 @@ */ #define BACKLIGHT_DUTY_CYCLE_SHIFT (0) #define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) +#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe) +#define BLM_POLARITY_PNV (1 << 0) /* pnv only */ #define BLC_HIST_CTL 0x61260 -- cgit v1.2.3 From 7cf4160148136deb31ee5f2802857dd935a38529 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 5 Jun 2012 10:07:09 +0200 Subject: drm/i915: clear up backlight #define confusion on gen4+ - Regroup definitions for BLC_PWM_CTL so that they're all together and and ordered according to the bitfields. - Add all missing definitions for BLC_PWM_CTL2. - Use the BLM_ (for backlight modulation) prefix consistently. - Note that combination mode (i.e. also taking the legacy backlight control value from pci config space into account) is gen4 only. - Move the new registers for PCH-split machines up, they're an almost match for the gen4 defitions. Prefix the special PCH-only bits with BLM_PCH_. Also add the pipe C select bit for ivb. - Rip out the second pair of PCH polarity definitions - they're only valid on early (pre-production) ilk silicon. - Adapt the existing code to use the new definitions. This has the nice benefit of killing a magic (1 << 30) left behind be Jesse Barnes. No functional changes in this patch. Reviewed-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 55 +++++++++++++++++++++++------------- drivers/gpu/drm/i915/intel_display.c | 4 +-- drivers/gpu/drm/i915/intel_lvds.c | 12 ++++---- 3 files changed, 43 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 20244b971fc6..c4b1a2b45bb4 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1807,18 +1807,35 @@ #define PFIT_AUTO_RATIOS 0x61238 /* Backlight control */ -#define BLC_PWM_CTL 0x61254 -#define BACKLIGHT_MODULATION_FREQ_SHIFT (17) #define BLC_PWM_CTL2 0x61250 /* 965+ only */ -#define BLM_COMBINATION_MODE (1 << 30) +#define BLM_PWM_ENABLE (1 << 31) +#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */ +#define BLM_PIPE_SELECT (1 << 29) +#define BLM_PIPE_SELECT_IVB (3 << 29) +#define BLM_PIPE_A (0 << 29) +#define BLM_PIPE_B (1 << 29) +#define BLM_PIPE_C (2 << 29) /* ivb + */ +#define BLM_PIPE(pipe) ((pipe) << 29) +#define BLM_POLARITY_I965 (1 << 28) /* gen4 only */ +#define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26) +#define BLM_PHASE_IN_ENABLE (1 << 25) +#define BLM_PHASE_IN_INTERUPT_ENABL (1 << 24) +#define BLM_PHASE_IN_TIME_BASE_SHIFT (16) +#define BLM_PHASE_IN_TIME_BASE_MASK (0xff << 16) +#define BLM_PHASE_IN_COUNT_SHIFT (8) +#define BLM_PHASE_IN_COUNT_MASK (0xff << 8) +#define BLM_PHASE_IN_INCR_SHIFT (0) +#define BLM_PHASE_IN_INCR_MASK (0xff << 0) +#define BLC_PWM_CTL 0x61254 /* * This is the most significant 15 bits of the number of backlight cycles in a * complete cycle of the modulated backlight control. * * The actual value is this field multiplied by two. */ -#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) -#define BLM_LEGACY_MODE (1 << 16) +#define BACKLIGHT_MODULATION_FREQ_SHIFT (17) +#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) +#define BLM_LEGACY_MODE (1 << 16) /* gen2 only */ /* * This is the number of cycles out of the backlight modulation cycle for which * the backlight is on. @@ -1833,6 +1850,19 @@ #define BLC_HIST_CTL 0x61260 +/* New registers for PCH-split platforms. Safe where new bits show up, the + * register layout machtes with gen4 BLC_PWM_CTL[12]. */ +#define BLC_PWM_CPU_CTL2 0x48250 +#define BLC_PWM_CPU_CTL 0x48254 + +/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is + * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */ +#define BLC_PWM_PCH_CTL1 0xc8250 +#define BLM_PCH_PWM_ENABLE (1 << 30) +#define BLM_PCH_OVERRIDE_ENABLE (1 << 30) +#define BLM_PCH_POLARITY (1 << 29) +#define BLC_PWM_PCH_CTL2 0xc8254 + /* TV port control */ #define TV_CTL 0x68000 /** Enables the TV encoder */ @@ -3840,21 +3870,6 @@ #define PCH_LVDS 0xe1180 #define LVDS_DETECTED (1 << 1) -#define BLC_PWM_CPU_CTL2 0x48250 -#define PWM_ENABLE (1 << 31) -#define PWM_PIPE_A (0 << 29) -#define PWM_PIPE_B (1 << 29) -#define BLC_PWM_CPU_CTL 0x48254 - -#define BLC_PWM_PCH_CTL1 0xc8250 -#define PWM_PCH_ENABLE (1 << 31) -#define PWM_POLARITY_ACTIVE_LOW (1 << 29) -#define PWM_POLARITY_ACTIVE_HIGH (0 << 29) -#define PWM_POLARITY_ACTIVE_LOW2 (1 << 28) -#define PWM_POLARITY_ACTIVE_HIGH2 (0 << 28) - -#define BLC_PWM_PCH_CTL2 0xc8254 - #define PCH_PP_STATUS 0xc7200 #define PCH_PP_CONTROL 0xc7204 #define PANEL_UNLOCK_REGS (0xabcd << 16) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5e760319c0f2..bad36e0b8045 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6895,9 +6895,9 @@ static void ivb_pch_pwm_override(struct drm_device *dev) * IVB has CPU eDP backlight regs too, set things up to let the * PCH regs control the backlight */ - I915_WRITE(BLC_PWM_CPU_CTL2, PWM_ENABLE); + I915_WRITE(BLC_PWM_CPU_CTL2, BLM_PWM_ENABLE); I915_WRITE(BLC_PWM_CPU_CTL, 0); - I915_WRITE(BLC_PWM_PCH_CTL1, PWM_ENABLE | (1<<30)); + I915_WRITE(BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE | BLM_PCH_OVERRIDE_ENABLE); } void intel_modeset_init_hw(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 08eb04c787e8..a7269e6b4bf7 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -1081,16 +1081,16 @@ out: /* make sure PWM is enabled and locked to the LVDS pipe */ pwm = I915_READ(BLC_PWM_CPU_CTL2); - if (pipe == 0 && (pwm & PWM_PIPE_B)) - I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~PWM_ENABLE); + if (pipe == 0 && (pwm & BLM_PIPE_B)) + I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~BLM_PWM_ENABLE); if (pipe) - pwm |= PWM_PIPE_B; + pwm |= BLM_PIPE_B; else - pwm &= ~PWM_PIPE_B; - I915_WRITE(BLC_PWM_CPU_CTL2, pwm | PWM_ENABLE); + pwm &= ~BLM_PIPE_B; + I915_WRITE(BLC_PWM_CPU_CTL2, pwm | BLM_PWM_ENABLE); pwm = I915_READ(BLC_PWM_PCH_CTL1); - pwm |= PWM_PCH_ENABLE; + pwm |= BLM_PCH_PWM_ENABLE; I915_WRITE(BLC_PWM_PCH_CTL1, pwm); /* * Unlock registers and just -- cgit v1.2.3 From 24ded204429fa0f5501d37c63ee35c555c0b75ee Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 5 Jun 2012 12:14:54 +0200 Subject: drm/i915: properly enable the blc controller on the right pipe On gen4+ we have a bitfield to specify from which pipe the backlight controller should take it's clock. For PCH split platforms we've already set these up, but only at initialization time. And without taking into account the 3rd pipe added with ivb. For gen4, we've completely ignored these. Although we do restrict lvds to the 2nd pipe, so this is only a problem on machines where we boot up with the lvds on the first pipe. So restructure the code to enable the backlight on the right pipe at modeset time. v2: For odd reasons panel_enable_backlight gets called twice in a modeset, so we can't WARN_ON in there if the backlight controller is switched on already. v3: backlight enable can also be called through dpms on, so the check in there is legit. Update the comment to reflect that. Tested-By: Kamal Mostafa Bugzilla: https://bugs.launchpad.net/bugs/954661 Cc: Carsten Emde Reviewed-by: Eugeni Dodonov Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_drv.h | 3 ++- drivers/gpu/drm/i915/intel_lvds.c | 32 ++++++-------------------------- drivers/gpu/drm/i915/intel_panel.c | 38 +++++++++++++++++++++++++++++++++++++- 3 files changed, 45 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index c35edd7ca84d..1a1fdb088dda 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -379,7 +379,8 @@ extern u32 intel_panel_get_max_backlight(struct drm_device *dev); extern u32 intel_panel_get_backlight(struct drm_device *dev); extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); extern int intel_panel_setup_backlight(struct drm_device *dev); -extern void intel_panel_enable_backlight(struct drm_device *dev); +extern void intel_panel_enable_backlight(struct drm_device *dev, + enum pipe pipe); extern void intel_panel_disable_backlight(struct drm_device *dev); extern void intel_panel_destroy_backlight(struct drm_device *dev); extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index a7269e6b4bf7..492db7740de2 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -71,6 +71,7 @@ static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector) static void intel_lvds_enable(struct intel_lvds *intel_lvds) { struct drm_device *dev = intel_lvds->base.base.dev; + struct intel_crtc *intel_crtc = to_intel_crtc(intel_lvds->base.base.crtc); struct drm_i915_private *dev_priv = dev->dev_private; u32 ctl_reg, lvds_reg, stat_reg; @@ -107,7 +108,7 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds) if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000)) DRM_ERROR("timed out waiting for panel to power on\n"); - intel_panel_enable_backlight(dev); + intel_panel_enable_backlight(dev, intel_crtc->pipe); } static void intel_lvds_disable(struct intel_lvds *intel_lvds) @@ -1074,35 +1075,14 @@ bool intel_lvds_init(struct drm_device *dev) goto failed; out: + /* + * Unlock registers and just + * leave them unlocked + */ if (HAS_PCH_SPLIT(dev)) { - u32 pwm; - - pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0; - - /* make sure PWM is enabled and locked to the LVDS pipe */ - pwm = I915_READ(BLC_PWM_CPU_CTL2); - if (pipe == 0 && (pwm & BLM_PIPE_B)) - I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~BLM_PWM_ENABLE); - if (pipe) - pwm |= BLM_PIPE_B; - else - pwm &= ~BLM_PIPE_B; - I915_WRITE(BLC_PWM_CPU_CTL2, pwm | BLM_PWM_ENABLE); - - pwm = I915_READ(BLC_PWM_PCH_CTL1); - pwm |= BLM_PCH_PWM_ENABLE; - I915_WRITE(BLC_PWM_PCH_CTL1, pwm); - /* - * Unlock registers and just - * leave them unlocked - */ I915_WRITE(PCH_PP_CONTROL, I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); } else { - /* - * Unlock registers and just - * leave them unlocked - */ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); } diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 7180cc828f94..58c7ee7238b8 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -287,9 +287,18 @@ void intel_panel_disable_backlight(struct drm_device *dev) dev_priv->backlight_enabled = false; intel_panel_actually_set_backlight(dev, 0); + + if (INTEL_INFO(dev)->gen >= 4) { + uint32_t reg; + + reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2; + + I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE); + } } -void intel_panel_enable_backlight(struct drm_device *dev) +void intel_panel_enable_backlight(struct drm_device *dev, + enum pipe pipe) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -298,6 +307,33 @@ void intel_panel_enable_backlight(struct drm_device *dev) dev_priv->backlight_enabled = true; intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); + + if (INTEL_INFO(dev)->gen >= 4) { + uint32_t reg, tmp; + + reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2; + + + tmp = I915_READ(reg); + + /* Note that this can also get called through dpms changes. And + * we don't track the backlight dpms state, hence check whether + * we have to do anything first. */ + if (tmp & BLM_PWM_ENABLE) + return; + + if (dev_priv->num_pipe == 3) + tmp &= ~BLM_PIPE_SELECT_IVB; + else + tmp &= ~BLM_PIPE_SELECT; + + tmp |= BLM_PIPE(pipe); + tmp &= ~BLM_PWM_ENABLE; + + I915_WRITE(reg, tmp); + POSTING_READ(reg); + I915_WRITE(reg, tmp | BLM_PWM_ENABLE); + } } static void intel_panel_init_backlight(struct drm_device *dev) -- cgit v1.2.3 From 0b9f43a0ee7e89013a3d913ce556715fd8acb674 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 5 Jun 2012 10:07:11 +0200 Subject: drm/i915: allow pipe A for lvds on gen4 Given the havoc the missing backlight pipe select code might have caused, let's try to re-enabled pipe A support for lvds on gen4 hw. Just to see what all blows up ... Note though that commit 4add75c43f39573edc884d46b7c2b7414f01171a Author: Chris Wilson Date: Sat Dec 4 17:49:46 2010 +0000 drm/i915: Allow LVDS to be on pipe A for Ironlake+ claims that this caused tons of spurious wakeups somehow. More details can be found in the old revert: commit 12e8ba25ef52f19e7a42e61aecb3c1fef83b2a82 Author: Chris Wilson Date: Tue Sep 7 23:39:28 2010 +0100 Revert "drm/i915: Allow LVDS on pipe A on gen4+" Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=16307 Reviewed-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_lvds.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 492db7740de2..ab4d64792616 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -968,6 +968,8 @@ bool intel_lvds_init(struct drm_device *dev) intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); if (HAS_PCH_SPLIT(dev)) intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); + else if (IS_GEN4(dev)) + intel_encoder->crtc_mask = (1 << 0) | (1 << 1); else intel_encoder->crtc_mask = (1 << 1); -- cgit v1.2.3 From 9b990de76ced807b2d92a601fb7f03863b632c58 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 7 Jun 2012 15:55:56 +0200 Subject: agp/intel-gtt: remove dead code This is a leftover from the conversion of the i81x fake agp driver over to the new intel-gtt code layoute. Reviewed-by: Jani Nikula Signed-Off-by: Daniel Vetter --- drivers/char/agp/intel-gtt.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 1237e7575c3f..53c4c7fca10b 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1556,9 +1556,6 @@ int intel_gmch_probe(struct pci_dev *pdev, pci_set_consistent_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)); - /*if (bridge->driver == &intel_810_driver) - return 1;*/ - if (intel_gtt_init() != 0) return 0; -- cgit v1.2.3 From dd2757f8b557ab2030154896eac9b2285557dda6 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 7 Jun 2012 15:55:57 +0200 Subject: drm/i915: stop using dev->agp->base For that to work we need to export the base address of the gtt mmio window from intel-gtt. Also replace all other uses of dev->agp by values we already have at hand. Reviewed-by: Jani Nikula Signed-off-by: Daniel Vetter --- drivers/char/agp/intel-gtt.c | 5 ++--- drivers/gpu/drm/i915/i915_dma.c | 21 +++++++++++++-------- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/i915_gem_debug.c | 3 ++- drivers/gpu/drm/i915/intel_display.c | 2 +- drivers/gpu/drm/i915/intel_fb.c | 4 +++- drivers/gpu/drm/i915/intel_ringbuffer.c | 6 ++++-- include/drm/intel-gtt.h | 2 ++ 9 files changed, 29 insertions(+), 17 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 53c4c7fca10b..2aab0a03ee42 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -66,7 +66,6 @@ static struct _intel_private { struct pci_dev *bridge_dev; u8 __iomem *registers; phys_addr_t gtt_bus_addr; - phys_addr_t gma_bus_addr; u32 PGETBL_save; u32 __iomem *gtt; /* I915G */ bool clear_fake_agp; /* on first access via agp, fill with scratch */ @@ -779,7 +778,7 @@ static bool intel_enable_gtt(void) pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &gma_addr); - intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); + intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); if (INTEL_GTT_GEN >= 6) return true; @@ -860,7 +859,7 @@ static int intel_fake_agp_configure(void) return -EIO; intel_private.clear_fake_agp = true; - agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; + agp_bridge->gart_bus_addr = intel_private.base.gma_bus_addr; return 0; } diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 97a5a5857f5b..c639d431ad66 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1085,8 +1085,8 @@ static int i915_set_status_page(struct drm_device *dev, void *data, ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); - dev_priv->dri1.gfx_hws_cpu_addr = ioremap_wc(dev->agp->base + hws->addr, - 4096); + dev_priv->dri1.gfx_hws_cpu_addr = + ioremap_wc(dev_priv->mm.gtt_base_addr + hws->addr, 4096); if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) { i915_dma_cleanup(dev); ring->status_page.gfx_addr = 0; @@ -1482,15 +1482,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) } aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; + dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr; dev_priv->mm.gtt_mapping = - io_mapping_create_wc(dev->agp->base, aperture_size); + io_mapping_create_wc(dev_priv->mm.gtt_base_addr, + aperture_size); if (dev_priv->mm.gtt_mapping == NULL) { ret = -EIO; goto out_rmmap; } - i915_mtrr_setup(dev_priv, dev->agp->base, aperture_size); + i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr, + aperture_size); /* The i915 workqueue is primarily used for batched retirement of * requests (and thus managing bo) once the task has been completed @@ -1602,8 +1605,9 @@ out_gem_unload: destroy_workqueue(dev_priv->wq); out_mtrrfree: if (dev_priv->mm.gtt_mtrr >= 0) { - mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, - dev->agp->agp_info.aper_size * 1024 * 1024); + mtrr_del(dev_priv->mm.gtt_mtrr, + dev_priv->mm.gtt_base_addr, + aperture_size); dev_priv->mm.gtt_mtrr = -1; } io_mapping_free(dev_priv->mm.gtt_mapping); @@ -1640,8 +1644,9 @@ int i915_driver_unload(struct drm_device *dev) io_mapping_free(dev_priv->mm.gtt_mapping); if (dev_priv->mm.gtt_mtrr >= 0) { - mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, - dev->agp->agp_info.aper_size * 1024 * 1024); + mtrr_del(dev_priv->mm.gtt_mtrr, + dev_priv->mm.gtt_base_addr, + dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE); dev_priv->mm.gtt_mtrr = -1; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ccabadd2b6c3..ae4129b3cb3e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -651,6 +651,7 @@ typedef struct drm_i915_private { unsigned long gtt_end; struct io_mapping *gtt_mapping; + phys_addr_t gtt_base_addr; int gtt_mtrr; /** PPGTT used for aliasing the PPGTT with the GTT */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index deaa0d4bb456..108e4c2b5ffa 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1122,7 +1122,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) obj->fault_mappable = true; - pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) + + pfn = ((dev_priv->mm.gtt_base_addr + obj->gtt_offset) >> PAGE_SHIFT) + page_offset; /* Finally, remap it using the new GTT offset */ diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index a4f6aaabca99..bddf7bed183f 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c @@ -132,7 +132,8 @@ i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle) __func__, obj, obj->gtt_offset, handle, obj->size / 1024); - gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size); + gtt_mapping = ioremap(dev_priv->mm.gtt_base_addr + obj->gtt_offset, + obj->base.size); if (gtt_mapping == NULL) { DRM_ERROR("failed to map GTT space\n"); return; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index bad36e0b8045..174549df5929 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6954,7 +6954,7 @@ void intel_modeset_init(struct drm_device *dev) dev->mode_config.max_width = 8192; dev->mode_config.max_height = 8192; } - dev->mode_config.fb_base = dev->agp->base; + dev->mode_config.fb_base = dev_priv->mm.gtt_base_addr; DRM_DEBUG_KMS("%d display pipe%s available.\n", dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index bf8690720a0c..e9f8338bd802 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -140,7 +140,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev, info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; info->fix.smem_len = size; - info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size); + info->screen_base = + ioremap_wc(dev_priv->mm.gtt_base_addr + obj->gtt_offset, + size); if (!info->screen_base) { ret = -ENOSPC; goto out_unpin; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 89a5e7f89d7a..14025ab9d4ca 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -968,6 +968,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, struct intel_ring_buffer *ring) { struct drm_i915_gem_object *obj; + struct drm_i915_private *dev_priv = dev->dev_private; int ret; ring->dev = dev; @@ -997,8 +998,9 @@ static int intel_init_ring_buffer(struct drm_device *dev, if (ret) goto err_unref; - ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset, - ring->size); + ring->virtual_start = + ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset, + ring->size); if (ring->virtual_start == NULL) { DRM_ERROR("Failed to map ringbuffer.\n"); ret = -EINVAL; diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index 923afb5dcf0c..8048c005c6f6 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h @@ -19,6 +19,8 @@ const struct intel_gtt { dma_addr_t scratch_page_dma; /* for ppgtt PDE access */ u32 __iomem *gtt; + /* needed for ioremap in drm/i915 */ + phys_addr_t gma_bus_addr; } *intel_gtt_get(void); void intel_gtt_chipset_flush(void); -- cgit v1.2.3 From 7e8f6306fe155d6fc3fe99d666be95b4ed24427d Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 7 Jun 2012 15:55:58 +0200 Subject: agp/intel-gtt: don't require the agp bridge on setup We only need it to fake the agp interface and don't actually use it in the driver anywhere. Hence conditionalize that. This is just a prep patch to eventually disable the fake agp driver on gen6+. Reviewed-by: Jani Nikula Signed-off-by: Daniel Vetter --- drivers/char/agp/intel-gtt.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 2aab0a03ee42..5e6c89e1d5eb 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1539,9 +1539,11 @@ int intel_gmch_probe(struct pci_dev *pdev, if (!intel_private.driver) return 0; - bridge->driver = &intel_fake_agp_driver; - bridge->dev_private_data = &intel_private; - bridge->dev = pdev; + if (bridge) { + bridge->driver = &intel_fake_agp_driver; + bridge->dev_private_data = &intel_private; + bridge->dev = pdev; + } intel_private.bridge_dev = pci_dev_get(pdev); -- cgit v1.2.3 From 14be93ddff61eb196382aeaa3ac86f4db844aeb0 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 8 Jun 2012 15:55:40 +0200 Subject: drm/i915 + agp/intel-gtt: prep work for direct setup To be able to directly set up the intel-gtt code from drm/i915 and avoid setting up the fake-agp driver we need to prepare a few things: - pass both the bridge and gpu pci_dev to the probe function and add code to handle the gpu pdev both being present (for drm/i915) and not present (fake agp). - add refcounting to the remove function so that unloading drm/i915 doesn't kill the fake agp driver v2: Fix up the cleanup and refcount, noticed by Jani Nikula. Reviewed-by: Jani Nikula Signed-Off-by: Daniel Vetter --- drivers/char/agp/intel-agp.c | 5 +++-- drivers/char/agp/intel-agp.h | 3 --- drivers/char/agp/intel-gtt.c | 46 +++++++++++++++++++++++++++++++++-------- drivers/gpu/drm/i915/i915_dma.c | 13 ++++++++++-- include/drm/intel-gtt.h | 4 ++++ 5 files changed, 55 insertions(+), 16 deletions(-) diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 764f70c5e690..c98c5689bb0b 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -12,6 +12,7 @@ #include #include "agp.h" #include "intel-agp.h" +#include int intel_agp_enabled; EXPORT_SYMBOL(intel_agp_enabled); @@ -747,7 +748,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, bridge->capndx = cap_ptr; - if (intel_gmch_probe(pdev, bridge)) + if (intel_gmch_probe(pdev, NULL, bridge)) goto found_gmch; for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { @@ -824,7 +825,7 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev) agp_remove_bridge(bridge); - intel_gmch_remove(pdev); + intel_gmch_remove(); agp_put_bridge(bridge); } diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index c0091753a0d1..cf2e764b1760 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h @@ -250,7 +250,4 @@ #define PCI_DEVICE_ID_INTEL_HASWELL_SDV 0x0c16 /* SDV */ #define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04 -int intel_gmch_probe(struct pci_dev *pdev, - struct agp_bridge_data *bridge); -void intel_gmch_remove(struct pci_dev *pdev); #endif diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 5e6c89e1d5eb..cea9f9905c7d 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -75,6 +75,7 @@ static struct _intel_private { struct resource ifp_resource; int resource_valid; struct page *scratch_page; + int refcount; } intel_private; #define INTEL_GTT_GEN intel_private.driver->gen @@ -1522,14 +1523,32 @@ static int find_gmch(u16 device) return 1; } -int intel_gmch_probe(struct pci_dev *pdev, - struct agp_bridge_data *bridge) +int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, + struct agp_bridge_data *bridge) { int i, mask; - intel_private.driver = NULL; + + /* + * Can be called from the fake agp driver but also directly from + * drm/i915.ko. Hence we need to check whether everything is set up + * already. + */ + if (intel_private.driver) { + intel_private.refcount++; + return 1; + } for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) { - if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) { + if (gpu_pdev) { + if (gpu_pdev->device == + intel_gtt_chipsets[i].gmch_chip_id) { + intel_private.pcidev = pci_dev_get(gpu_pdev); + intel_private.driver = + intel_gtt_chipsets[i].gtt_driver; + + break; + } + } else if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) { intel_private.driver = intel_gtt_chipsets[i].gtt_driver; break; @@ -1539,15 +1558,17 @@ int intel_gmch_probe(struct pci_dev *pdev, if (!intel_private.driver) return 0; + intel_private.refcount++; + if (bridge) { bridge->driver = &intel_fake_agp_driver; bridge->dev_private_data = &intel_private; - bridge->dev = pdev; + bridge->dev = bridge_pdev; } - intel_private.bridge_dev = pci_dev_get(pdev); + intel_private.bridge_dev = pci_dev_get(bridge_pdev); - dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name); + dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name); mask = intel_private.driver->dma_mask_size; if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) @@ -1557,8 +1578,11 @@ int intel_gmch_probe(struct pci_dev *pdev, pci_set_consistent_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)); - if (intel_gtt_init() != 0) + if (intel_gtt_init() != 0) { + intel_gmch_remove(); + return 0; + } return 1; } @@ -1577,12 +1601,16 @@ void intel_gtt_chipset_flush(void) } EXPORT_SYMBOL(intel_gtt_chipset_flush); -void intel_gmch_remove(struct pci_dev *pdev) +void intel_gmch_remove(void) { + if (--intel_private.refcount) + return; + if (intel_private.pcidev) pci_dev_put(intel_private.pcidev); if (intel_private.bridge_dev) pci_dev_put(intel_private.bridge_dev); + intel_private.driver = NULL; } EXPORT_SYMBOL(intel_gmch_remove); diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index c639d431ad66..cf512e7178b4 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1474,11 +1474,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto put_bridge; } + ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL); + if (!ret) { + DRM_ERROR("failed to set up gmch\n"); + ret = -EIO; + goto out_rmmap; + } + dev_priv->mm.gtt = intel_gtt_get(); if (!dev_priv->mm.gtt) { DRM_ERROR("Failed to initialize GTT\n"); ret = -ENODEV; - goto out_rmmap; + goto put_gmch; } aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; @@ -1489,7 +1496,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) aperture_size); if (dev_priv->mm.gtt_mapping == NULL) { ret = -EIO; - goto out_rmmap; + goto put_gmch; } i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr, @@ -1611,6 +1618,8 @@ out_mtrrfree: dev_priv->mm.gtt_mtrr = -1; } io_mapping_free(dev_priv->mm.gtt_mapping); +put_gmch: + intel_gmch_remove(); out_rmmap: pci_iounmap(dev->pdev, dev_priv->regs); put_bridge: diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index 8048c005c6f6..84ebd7188fc6 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h @@ -23,6 +23,10 @@ const struct intel_gtt { phys_addr_t gma_bus_addr; } *intel_gtt_get(void); +int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, + struct agp_bridge_data *bridge); +void intel_gmch_remove(void); + void intel_gtt_chipset_flush(void); void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg); void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries); -- cgit v1.2.3 From 32e3cd6ecd7ae9b79605b5f2eb993186a509c239 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 7 Jun 2012 15:56:02 +0200 Subject: agp/intel-gtt: move gart base addres setup We need this thing much earlier, and it doesn't make sense in the hw enabling function intel_enable_gtt - this does not change over a suspend/resume cycle ... Reviewed-by: Jani Nikula Signed-off-by: Daniel Vetter --- drivers/char/agp/intel-gtt.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index cea9f9905c7d..4387e69f8b11 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -648,6 +648,7 @@ static void intel_gtt_cleanup(void) static int intel_gtt_init(void) { + u32 gma_addr; u32 gtt_map_size; int ret; @@ -694,6 +695,15 @@ static int intel_gtt_init(void) return ret; } + if (INTEL_GTT_GEN <= 2) + pci_read_config_dword(intel_private.pcidev, I810_GMADDR, + &gma_addr); + else + pci_read_config_dword(intel_private.pcidev, I915_GMADDR, + &gma_addr); + + intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); + return 0; } @@ -769,18 +779,8 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry, static bool intel_enable_gtt(void) { - u32 gma_addr; u8 __iomem *reg; - if (INTEL_GTT_GEN <= 2) - pci_read_config_dword(intel_private.pcidev, I810_GMADDR, - &gma_addr); - else - pci_read_config_dword(intel_private.pcidev, I915_GMADDR, - &gma_addr); - - intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); - if (INTEL_GTT_GEN >= 6) return true; -- cgit v1.2.3 From 8ecd1a6615f0d9de6759aafe229bc1cc4ee99c7b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 7 Jun 2012 15:56:03 +0200 Subject: drm/i915: call intel_enable_gtt When drm/i915 is in control of the gtt, we need to call the enable function at all the relevant places ourselves. Reviewed-by: Jani Nikula Signed-off-by: Daniel Vetter --- drivers/char/agp/intel-gtt.c | 3 ++- drivers/gpu/drm/i915/i915_gem.c | 3 +++ include/drm/intel-gtt.h | 2 ++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 4387e69f8b11..419a25eeefd8 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -777,7 +777,7 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry, writel(addr | pte_flags, intel_private.gtt + entry); } -static bool intel_enable_gtt(void) +bool intel_enable_gtt(void) { u8 __iomem *reg; @@ -823,6 +823,7 @@ static bool intel_enable_gtt(void) return true; } +EXPORT_SYMBOL(intel_enable_gtt); static int i830_setup(void) { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 108e4c2b5ffa..2884b0865473 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3689,6 +3689,9 @@ i915_gem_init_hw(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; int ret; + if (!intel_enable_gtt()) + return -EIO; + i915_gem_l3_remap(dev); i915_gem_init_swizzling(dev); diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index 84ebd7188fc6..8e29d551bb3c 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h @@ -27,6 +27,8 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, struct agp_bridge_data *bridge); void intel_gmch_remove(void); +bool intel_enable_gtt(void); + void intel_gtt_chipset_flush(void); void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg); void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries); -- cgit v1.2.3 From e188719a2891f01b3100dca4ae3a055fb5a7ab52 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 12 Jun 2012 11:28:17 +0200 Subject: drm/i915: kick any firmware framebuffers before claiming the gtt Especially vesafb likes to map everything as uc- (yikes), and if that mapping hangs around still while we try to map the gtt as wc the kernel will downgrade our request to uc-, resulting in abyssal performance. Unfortunately we can't do this as early as readon does (i.e. as the first thing we do when initializing the hw) because our fb/mmio space region moves around on a per-gen basis. So I've had to move it below the gtt initialization, but that seems to work, too. The important thing is that we do this before we set up the gtt wc mapping. Now an altogether different question is why people compile their kernels with vesafb enabled, but I guess making things just work isn't bad per se ... v2: - s/radeondrmfb/inteldrmfb/ - fix up error handling v3: Kill #ifdef X86, this is Intel after all. Noticed by Ben Widawsky. v4: Jani Nikula complained about the pointless bool primary initialization. v5: Don't oops if we can't allocate, noticed by Chris Wilson. v6: Resolve conflicts with agp rework and fixup whitespace. Reported-and-tested-by: "Kilarski, Bernard R" Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 57 +++++++++++++++++++++++++++++------------ 1 file changed, 40 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index cf512e7178b4..fa8f2699344b 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1404,6 +1404,27 @@ i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base, } } +static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) +{ + struct apertures_struct *ap; + struct pci_dev *pdev = dev_priv->dev->pdev; + bool primary; + + ap = alloc_apertures(1); + if (!ap) + return; + + ap->ranges[0].base = dev_priv->dev->agp->base; + ap->ranges[0].size = + dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; + primary = + pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; + + remove_conflicting_framebuffers(ap, "inteldrmfb", primary); + + kfree(ap); +} + /** * i915_driver_load - setup chip and create an initial config * @dev: DRM device @@ -1449,6 +1470,22 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto free_priv; } + ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL); + if (!ret) { + DRM_ERROR("failed to set up gmch\n"); + ret = -EIO; + goto put_bridge; + } + + dev_priv->mm.gtt = intel_gtt_get(); + if (!dev_priv->mm.gtt) { + DRM_ERROR("Failed to initialize GTT\n"); + ret = -ENODEV; + goto put_gmch; + } + + i915_kick_out_firmware_fb(dev_priv); + pci_set_master(dev->pdev); /* overlay on gen2 is broken and can't address above 1G */ @@ -1471,20 +1508,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (!dev_priv->regs) { DRM_ERROR("failed to map registers\n"); ret = -EIO; - goto put_bridge; - } - - ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL); - if (!ret) { - DRM_ERROR("failed to set up gmch\n"); - ret = -EIO; - goto out_rmmap; - } - - dev_priv->mm.gtt = intel_gtt_get(); - if (!dev_priv->mm.gtt) { - DRM_ERROR("Failed to initialize GTT\n"); - ret = -ENODEV; goto put_gmch; } @@ -1496,7 +1519,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) aperture_size); if (dev_priv->mm.gtt_mapping == NULL) { ret = -EIO; - goto put_gmch; + goto out_rmmap; } i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr, @@ -1618,10 +1641,10 @@ out_mtrrfree: dev_priv->mm.gtt_mtrr = -1; } io_mapping_free(dev_priv->mm.gtt_mapping); -put_gmch: - intel_gmch_remove(); out_rmmap: pci_iounmap(dev->pdev, dev_priv->regs); +put_gmch: + intel_gmch_remove(); put_bridge: pci_dev_put(dev_priv->bridge_dev); free_priv: -- cgit v1.2.3 From 93314b5b6f343f5a4c14c89d000007a754190c9a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 13 Jun 2012 17:36:55 +0100 Subject: drm/i915: Switch off FBC when disabling the primary plane when obscured As we switch on/off the primary plane if it is completely obscured by an overlapping video sprite, we also nee to make sure that we update the FBC configuration at the same time. v2: Not all crtcs are intel_crtcs, as spotted by Daniel. v3: Boot testing rules. References: https://bugs.freedesktop.org/show_bug.cgi?id=50238 Signed-off-by: Chris Wilson Cc: Jesse Barnes Reviewed-by: Jesse Barnes Reviewed-by: Jani Nikula Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_drv.h | 2 +- drivers/gpu/drm/i915/intel_pm.c | 4 +++- drivers/gpu/drm/i915/intel_sprite.c | 25 +++++++++++++++---------- 3 files changed, 19 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 1a1fdb088dda..5290e9df327b 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -169,6 +169,7 @@ struct intel_crtc { u8 lut_r[256], lut_g[256], lut_b[256]; int dpms_mode; bool active; /* is the crtc on? independent of the dpms mode */ + bool primary_disabled; /* is the crtc obscured by a plane? */ bool busy; /* is scanout buffer being updated frequently? */ struct timer_list idle_timer; bool lowfreq_avail; @@ -191,7 +192,6 @@ struct intel_plane { struct drm_plane base; enum pipe pipe; struct drm_i915_gem_object *obj; - bool primary_disabled; int max_downscale; u32 lut_r[1024], lut_g[1024], lut_b[1024]; void (*update_plane)(struct drm_plane *plane, diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d0ce2a5b1d3f..b7de5ea62aa4 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -405,7 +405,9 @@ void intel_update_fbc(struct drm_device *dev) * - going to an unsupported config (interlace, pixel multiply, etc.) */ list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { - if (tmp_crtc->enabled && tmp_crtc->fb) { + if (tmp_crtc->enabled && + !to_intel_crtc(tmp_crtc)->primary_disabled && + tmp_crtc->fb) { if (crtc) { DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 2a20fb0781d7..9d7777bc1545 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -326,6 +326,12 @@ intel_enable_primary(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int reg = DSPCNTR(intel_crtc->plane); + if (!intel_crtc->primary_disabled) + return; + + intel_crtc->primary_disabled = false; + intel_update_fbc(dev); + I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE); } @@ -337,7 +343,13 @@ intel_disable_primary(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int reg = DSPCNTR(intel_crtc->plane); + if (intel_crtc->primary_disabled) + return; + I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE); + + intel_crtc->primary_disabled = true; + intel_update_fbc(dev); } static int @@ -485,18 +497,14 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, * Be sure to re-enable the primary before the sprite is no longer * covering it fully. */ - if (!disable_primary && intel_plane->primary_disabled) { + if (!disable_primary) intel_enable_primary(crtc); - intel_plane->primary_disabled = false; - } intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y, crtc_w, crtc_h, x, y, src_w, src_h); - if (disable_primary) { + if (disable_primary) intel_disable_primary(crtc); - intel_plane->primary_disabled = true; - } /* Unpin old obj after new one is active to avoid ugliness */ if (old_obj) { @@ -527,11 +535,8 @@ intel_disable_plane(struct drm_plane *plane) struct intel_plane *intel_plane = to_intel_plane(plane); int ret = 0; - if (intel_plane->primary_disabled) { + if (plane->crtc) intel_enable_primary(plane->crtc); - intel_plane->primary_disabled = false; - } - intel_plane->disable_plane(plane); if (!intel_plane->obj) -- cgit v1.2.3 From 14d94a3d82ab3ef6b3a9f881e134d5b48323b202 Mon Sep 17 00:00:00 2001 From: Seth Forshee Date: Wed, 13 Jun 2012 13:46:58 -0500 Subject: drm/i915: ignore pipe select bit when checking for LVDS register initialization The Lenovo Thinkpad T410 has the LVDS_PIPEB_SELECT bit set in the LVDS register when booted with the lid closed, even though the LVDS hasn't really been initialized. Ignore this bit so that the VBT value will be used instead. Signed-off-by: Seth Forshee Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 174549df5929..308e1a2967e7 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -434,7 +434,7 @@ static bool is_dual_link_lvds(struct drm_i915_private *dev_priv, * register is uninitialized. */ val = I915_READ(reg); - if (!(val & ~LVDS_DETECTED)) + if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED))) val = dev_priv->bios_lvds_val; dev_priv->lvds_val = val; } -- cgit v1.2.3 From fe1cc68fcb11ca14f420068d1806eb5e719ac772 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 4 Jun 2012 14:42:41 -0700 Subject: drm/i915: CXT_SIZE register offsets added The GPUs can have different default context layouts, and the sizes could vary based on platform or BIOS. In order to back the context object with a properly sized BO, we must read this register in order to find out a sufficient size. Thankfully (sarcarm!), the register moves and changes meanings throughout generations. CTX and CXT differences are intentional as that is how it is in the documentation (prior to GEN6 it was CXT). Signed-off-by: Ben Widawsky --- drivers/gpu/drm/i915/i915_reg.h | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c4b1a2b45bb4..bee101208195 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1454,6 +1454,27 @@ */ #define CCID 0x2180 #define CCID_EN (1<<0) +#define CXT_SIZE 0x21a0 +#define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f) +#define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f) +#define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f) +#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f) +#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f) +#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_POWER_SIZE(cxt_reg) + \ + GEN6_CXT_RING_SIZE(cxt_reg) + \ + GEN6_CXT_RENDER_SIZE(cxt_reg) + \ + GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \ + GEN6_CXT_PIPELINE_SIZE(cxt_reg)) +#define GEN7_CTX_SIZE 0x21a8 +#define GEN7_CTX_RENDER_SIZE(ctx_reg) ((ctx_reg >> 16) & 0x3f) +#define GEN7_CTX_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f) +#define GEN7_CTX_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7) +#define GEN7_CTX_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f) +#define GEN7_CTX_TOTAL_SIZE(ctx_reg) (GEN7_CTX_RENDER_SIZE(ctx_reg) + \ + GEN7_CTX_EXTENDED_SIZE(ctx_reg) + \ + GEN7_CTX_GT1_SIZE(ctx_reg) + \ + GEN7_CTX_VFSTATE_SIZE(ctx_reg)) + /* * Overlay regs */ -- cgit v1.2.3 From 254f965c39e3918544395f4ebac8c589d890bae6 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 4 Jun 2012 14:42:42 -0700 Subject: drm/i915: preliminary context support Very basic code for context setup/destruction in the driver. Adds the file i915_gem_context.c This file implements HW context support. On gen5+ a HW context consists of an opaque GPU object which is referenced at times of context saves and restores. With RC6 enabled, the context is also referenced as the GPU enters and exists from RC6 (GPU has it's own internal power context, except on gen5). Though something like a context does exist for the media ring, the code only supports contexts for the render ring. In software, there is a distinction between contexts created by the user, and the default HW context. The default HW context is used by GPU clients that do not request setup of their own hardware context. The default context's state is never restored to help prevent programming errors. This would happen if a client ran and piggy-backed off another clients GPU state. The default context only exists to give the GPU some offset to load as the current to invoke a save of the context we actually care about. In fact, the code could likely be constructed, albeit in a more complicated fashion, to never use the default context, though that limits the driver's ability to swap out, and/or destroy other contexts. All other contexts are created as a request by the GPU client. These contexts store GPU state, and thus allow GPU clients to not re-emit state (and potentially query certain state) at any time. The kernel driver makes certain that the appropriate commands are inserted. There are 4 entry points into the contexts, init, fini, open, close. The names are self-explanatory except that init can be called during reset, and also during pm thaw/resume. As we expect our context to be preserved across these events, we do not reinitialize in this case. As Adam Jackson pointed out, The cutoff of 1MB where a HW context is considered too big is arbitrary. The reason for this is even though context sizes are increasing with every generation, they have yet to eclipse even 32k. If we somehow read back way more than that, it probably means BIOS has done something strange, or we're running on a platform that wasn't designed for this. v2: rename load/unload to init/fini (daniel) remove ILK support for get_size() (indirectly daniel) add HAS_HW_CONTEXTS macro to clarify supported platforms (daniel) added comments (Ben) Signed-off-by: Ben Widawsky --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/i915_dma.c | 4 + drivers/gpu/drm/i915/i915_drv.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 8 ++ drivers/gpu/drm/i915/i915_gem.c | 5 + drivers/gpu/drm/i915/i915_gem_context.c | 175 ++++++++++++++++++++++++++++++++ 6 files changed, 194 insertions(+) create mode 100644 drivers/gpu/drm/i915/i915_gem_context.c diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 2e9268da58d8..b0bacdba6d7e 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -7,6 +7,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \ i915_debugfs.o \ i915_suspend.o \ i915_gem.o \ + i915_gem_context.o \ i915_gem_debug.o \ i915_gem_evict.o \ i915_gem_execbuffer.o \ diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index fa8f2699344b..3f3aca8c3767 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1669,6 +1669,7 @@ int i915_driver_unload(struct drm_device *dev) if (ret) DRM_ERROR("failed to idle hardware: %d\n", ret); i915_gem_retire_requests(dev); + i915_gem_context_fini(dev); mutex_unlock(&dev->struct_mutex); /* Cancel the retire work handler, which should be idle now. */ @@ -1758,6 +1759,8 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file) spin_lock_init(&file_priv->mm.lock); INIT_LIST_HEAD(&file_priv->mm.request_list); + i915_gem_context_open(dev, file); + return 0; } @@ -1790,6 +1793,7 @@ void i915_driver_lastclose(struct drm_device * dev) void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) { + i915_gem_context_close(dev, file_priv); i915_gem_release(dev, file_priv); } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 238a52165833..5d9e07ece3c2 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -903,6 +903,7 @@ int i915_reset(struct drm_device *dev) for_each_ring(ring, dev_priv, i) ring->init(ring); + i915_gem_context_init(dev); i915_gem_init_ppgtt(dev); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ae4129b3cb3e..8d02951b988b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -821,6 +821,8 @@ typedef struct drm_i915_private { struct drm_property *force_audio_property; struct work_struct parity_error_work; + bool hw_contexts_disabled; + uint32_t hw_context_size; } drm_i915_private_t; /* Iterate over initialised rings */ @@ -1075,6 +1077,7 @@ struct drm_i915_file_private { #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) +#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6) #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) @@ -1363,6 +1366,11 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, struct dma_buf *i915_gem_prime_export(struct drm_device *dev, struct drm_gem_object *gem_obj, int flags); +/* i915_gem_context.c */ +void i915_gem_context_init(struct drm_device *dev); +void i915_gem_context_fini(struct drm_device *dev); +void i915_gem_context_open(struct drm_device *dev, struct drm_file *file); +void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); /* i915_gem_gtt.c */ int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2884b0865473..c06f50d44a23 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3714,6 +3714,11 @@ i915_gem_init_hw(struct drm_device *dev) dev_priv->next_seqno = 1; + /* + * XXX: There was some w/a described somewhere suggesting loading + * contexts before PPGTT. + */ + i915_gem_context_init(dev); i915_gem_init_ppgtt(dev); return 0; diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c new file mode 100644 index 000000000000..e39808e349f1 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -0,0 +1,175 @@ +/* + * Copyright © 2011-2012 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Ben Widawsky + * + */ + +/* + * This file implements HW context support. On gen5+ a HW context consists of an + * opaque GPU object which is referenced at times of context saves and restores. + * With RC6 enabled, the context is also referenced as the GPU enters and exists + * from RC6 (GPU has it's own internal power context, except on gen5). Though + * something like a context does exist for the media ring, the code only + * supports contexts for the render ring. + * + * In software, there is a distinction between contexts created by the user, + * and the default HW context. The default HW context is used by GPU clients + * that do not request setup of their own hardware context. The default + * context's state is never restored to help prevent programming errors. This + * would happen if a client ran and piggy-backed off another clients GPU state. + * The default context only exists to give the GPU some offset to load as the + * current to invoke a save of the context we actually care about. In fact, the + * code could likely be constructed, albeit in a more complicated fashion, to + * never use the default context, though that limits the driver's ability to + * swap out, and/or destroy other contexts. + * + * All other contexts are created as a request by the GPU client. These contexts + * store GPU state, and thus allow GPU clients to not re-emit state (and + * potentially query certain state) at any time. The kernel driver makes + * certain that the appropriate commands are inserted. + * + * The context life cycle is semi-complicated in that context BOs may live + * longer than the context itself because of the way the hardware, and object + * tracking works. Below is a very crude representation of the state machine + * describing the context life. + * refcount pincount active + * S0: initial state 0 0 0 + * S1: context created 1 0 0 + * S2: context is currently running 2 1 X + * S3: GPU referenced, but not current 2 0 1 + * S4: context is current, but destroyed 1 1 0 + * S5: like S3, but destroyed 1 0 1 + * + * The most common (but not all) transitions: + * S0->S1: client creates a context + * S1->S2: client submits execbuf with context + * S2->S3: other clients submits execbuf with context + * S3->S1: context object was retired + * S3->S2: clients submits another execbuf + * S2->S4: context destroy called with current context + * S3->S5->S0: destroy path + * S4->S5->S0: destroy path on current context + * + * There are two confusing terms used above: + * The "current context" means the context which is currently running on the + * GPU. The GPU has loaded it's state already and has stored away the gtt + * offset of the BO. The GPU is not actively referencing the data at this + * offset, but it will on the next context switch. The only way to avoid this + * is to do a GPU reset. + * + * An "active context' is one which was previously the "current context" and is + * on the active list waiting for the next context switch to occur. Until this + * happens, the object must remain at the same gtt offset. It is therefore + * possible to destroy a context, but it is still active. + * + */ + +#include "drmP.h" +#include "i915_drm.h" +#include "i915_drv.h" + +static int get_context_size(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + u32 reg; + + switch (INTEL_INFO(dev)->gen) { + case 6: + reg = I915_READ(CXT_SIZE); + ret = GEN6_CXT_TOTAL_SIZE(reg) * 64; + break; + case 7: + reg = I915_READ(GEN7_CTX_SIZE); + ret = GEN7_CTX_TOTAL_SIZE(reg) * 64; + break; + default: + BUG(); + } + + return ret; +} + +/** + * The default context needs to exist per ring that uses contexts. It stores the + * context state of the GPU for applications that don't utilize HW contexts, as + * well as an idle case. + */ +static int create_default_context(struct drm_i915_private *dev_priv) +{ + return 0; +} + +void i915_gem_context_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t ctx_size; + + if (!HAS_HW_CONTEXTS(dev)) + return; + + /* If called from reset, or thaw... we've been here already */ + if (dev_priv->hw_contexts_disabled) + return; + + ctx_size = get_context_size(dev); + dev_priv->hw_context_size = get_context_size(dev); + dev_priv->hw_context_size = round_up(dev_priv->hw_context_size, 4096); + + if (ctx_size <= 0 || ctx_size > (1<<20)) { + dev_priv->hw_contexts_disabled = true; + return; + } + + if (create_default_context(dev_priv)) { + dev_priv->hw_contexts_disabled = true; + return; + } + + DRM_DEBUG_DRIVER("HW context support initialized\n"); +} + +void i915_gem_context_fini(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->hw_contexts_disabled) + return; +} + +void i915_gem_context_open(struct drm_device *dev, struct drm_file *file) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->hw_contexts_disabled) + return; +} + +void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->hw_contexts_disabled) + return; +} -- cgit v1.2.3 From 40521054fd46f94e0368cead312d56e9e442aaab Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 4 Jun 2012 14:42:43 -0700 Subject: drm/i915: context basic create & destroy Invent an abstraction for a hw context which is passed around through the core functions. The main bit a hw context holds is the buffer object which backs the context. The rest of the members are just helper functions. Specifically the ring member, which could likely go away if we decide to never implement whatever other hw context support exists. Of note here is the introduction of the 64k alignment constraint for the BO. If contexts become heavily used, we should consider tweaking this down to 4k. Until the contexts are merged and tested a bit though, I think 64k is a nice start (based on docs). Since we don't yet switch contexts, there is really not much complexity here. Creation/destruction works pretty much as one would expect. An idr is used to generate the context id numbers which are unique per file descriptor. v2: add DRM_DEBUG_DRIVERS to distinguish ENOMEM failures (ben) convert a BUG_ON to WARN_ON, default destruction is still fatal (ben) Signed-off-by: Ben Widawsky --- drivers/gpu/drm/i915/i915_drv.h | 11 +++ drivers/gpu/drm/i915/i915_gem_context.c | 142 +++++++++++++++++++++++++++++++- drivers/gpu/drm/i915/intel_ringbuffer.h | 2 + 3 files changed, 153 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8d02951b988b..250eeae7c262 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -308,6 +308,16 @@ struct i915_hw_ppgtt { dma_addr_t scratch_page_dma_addr; }; + +/* This must match up with the value previously used for execbuf2.rsvd1. */ +#define DEFAULT_CONTEXT_ID 0 +struct i915_hw_context { + int id; + struct drm_i915_file_private *file_priv; + struct intel_ring_buffer *ring; + struct drm_i915_gem_object *obj; +}; + enum no_fbc_reason { FBC_NO_OUTPUT, /* no outputs enabled to compress */ FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ @@ -1032,6 +1042,7 @@ struct drm_i915_file_private { struct spinlock lock; struct list_head request_list; } mm; + struct idr context_idr; }; #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index e39808e349f1..2aca00235ce3 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -89,6 +89,15 @@ #include "i915_drm.h" #include "i915_drv.h" +/* This is a HW constraint. The value below is the largest known requirement + * I've seen in a spec to date, and that was a workaround for a non-shipping + * part. It should be safe to decrease this, but it's more future proof as is. + */ +#define CONTEXT_ALIGN (64<<10) + +static struct i915_hw_context * +i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); + static int get_context_size(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -111,6 +120,76 @@ static int get_context_size(struct drm_device *dev) return ret; } +static void do_destroy(struct i915_hw_context *ctx) +{ + struct drm_device *dev = ctx->obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + if (ctx->file_priv) + idr_remove(&ctx->file_priv->context_idr, ctx->id); + else + BUG_ON(ctx != dev_priv->ring[RCS].default_context); + + drm_gem_object_unreference(&ctx->obj->base); + kfree(ctx); +} + +static int +create_hw_context(struct drm_device *dev, + struct drm_i915_file_private *file_priv, + struct i915_hw_context **ctx_out) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret, id; + + *ctx_out = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL); + if (*ctx_out == NULL) + return -ENOMEM; + + (*ctx_out)->obj = i915_gem_alloc_object(dev, + dev_priv->hw_context_size); + if ((*ctx_out)->obj == NULL) { + kfree(*ctx_out); + DRM_DEBUG_DRIVER("Context object allocated failed\n"); + return -ENOMEM; + } + + /* The ring associated with the context object is handled by the normal + * object tracking code. We give an initial ring value simple to pass an + * assertion in the context switch code. + */ + (*ctx_out)->ring = &dev_priv->ring[RCS]; + + /* Default context will never have a file_priv */ + if (file_priv == NULL) + return 0; + + (*ctx_out)->file_priv = file_priv; + +again: + if (idr_pre_get(&file_priv->context_idr, GFP_KERNEL) == 0) { + ret = -ENOMEM; + DRM_DEBUG_DRIVER("idr allocation failed\n"); + goto err_out; + } + + ret = idr_get_new_above(&file_priv->context_idr, *ctx_out, + DEFAULT_CONTEXT_ID + 1, &id); + if (ret == 0) + (*ctx_out)->id = id; + + if (ret == -EAGAIN) + goto again; + else if (ret) + goto err_out; + + return 0; + +err_out: + do_destroy(*ctx_out); + return ret; +} + /** * The default context needs to exist per ring that uses contexts. It stores the * context state of the GPU for applications that don't utilize HW contexts, as @@ -118,7 +197,30 @@ static int get_context_size(struct drm_device *dev) */ static int create_default_context(struct drm_i915_private *dev_priv) { - return 0; + struct i915_hw_context *ctx; + int ret; + + BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); + + ret = create_hw_context(dev_priv->dev, NULL, + &dev_priv->ring[RCS].default_context); + if (ret) + return ret; + + /* We may need to do things with the shrinker which require us to + * immediately switch back to the default context. This can cause a + * problem as pinning the default context also requires GTT space which + * may not be available. To avoid this we always pin the + * default context. + */ + ctx = dev_priv->ring[RCS].default_context; + ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false); + if (ret) { + do_destroy(ctx); + return ret; + } + + return ret; } void i915_gem_context_init(struct drm_device *dev) @@ -130,7 +232,8 @@ void i915_gem_context_init(struct drm_device *dev) return; /* If called from reset, or thaw... we've been here already */ - if (dev_priv->hw_contexts_disabled) + if (dev_priv->hw_contexts_disabled || + dev_priv->ring[RCS].default_context) return; ctx_size = get_context_size(dev); @@ -156,20 +259,55 @@ void i915_gem_context_fini(struct drm_device *dev) if (dev_priv->hw_contexts_disabled) return; + + i915_gem_object_unpin(dev_priv->ring[RCS].default_context->obj); + + do_destroy(dev_priv->ring[RCS].default_context); } void i915_gem_context_open(struct drm_device *dev, struct drm_file *file) { struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_file_private *file_priv = file->driver_priv; if (dev_priv->hw_contexts_disabled) return; + + idr_init(&file_priv->context_idr); +} + +static int context_idr_cleanup(int id, void *p, void *data) +{ + struct drm_file *file = (struct drm_file *)data; + struct drm_i915_file_private *file_priv = file->driver_priv; + struct i915_hw_context *ctx; + + BUG_ON(id == DEFAULT_CONTEXT_ID); + ctx = i915_gem_context_get(file_priv, id); + if (WARN_ON(ctx == NULL)) + return -ENXIO; + + do_destroy(ctx); + + return 0; } void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) { struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_file_private *file_priv = file->driver_priv; if (dev_priv->hw_contexts_disabled) return; + + mutex_lock(&dev->struct_mutex); + idr_for_each(&file_priv->context_idr, context_idr_cleanup, file); + idr_destroy(&file_priv->context_idr); + mutex_unlock(&dev->struct_mutex); +} + +static __used struct i915_hw_context * +i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) +{ + return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 55d3da26bae7..bb19becb1163 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -116,6 +116,8 @@ struct intel_ring_buffer { wait_queue_head_t irq_queue; + struct i915_hw_context *default_context; + void *private; }; -- cgit v1.2.3 From e055684168af48ac7deb27d7267046a0fb9ef80e Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 4 Jun 2012 14:42:46 -0700 Subject: drm/i915: context switch implementation Implement the context switch code as well as the interfaces to do the context switch. This patch also doesn't match 1:1 with the RFC patches. The main difference is that from Daniel's responses the last context object is now stored instead of the last context. This aids in allows us to free the context data structure, and context object independently. There is room for optimization: this code will pin the context object until the next context is active. The optimal way to do it is to actually pin the object, move it to the active list, do the context switch, and then unpin it. This allows the eviction code to actually evict the context object if needed. The context switch code is missing workarounds, they will be implemented in future patches. v2: actually do obj->dirty=1 in switch (daniel) Modified comment around above Remove flags to context switch (daniel) Move mi_set_context code to i915_gem_context.c (daniel) Remove seqno , use lazy request instead (daniel) v3: use i915_gem_request_next_seqno instead of outstanding_lazy_request (Daniel) remove id's from trace events (Daniel) Put the context BO in the instruction domain (Daniel) Don't unref the BO is context switch fails (Chris) Signed-off-by: Ben Widawsky --- drivers/gpu/drm/i915/i915_drv.h | 3 + drivers/gpu/drm/i915/i915_gem_context.c | 146 +++++++++++++++++++++++++++++++- drivers/gpu/drm/i915/intel_ringbuffer.h | 1 + 3 files changed, 149 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 250eeae7c262..7fb0364d5d61 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -313,6 +313,7 @@ struct i915_hw_ppgtt { #define DEFAULT_CONTEXT_ID 0 struct i915_hw_context { int id; + bool is_initialized; struct drm_i915_file_private *file_priv; struct intel_ring_buffer *ring; struct drm_i915_gem_object *obj; @@ -1382,6 +1383,8 @@ void i915_gem_context_init(struct drm_device *dev); void i915_gem_context_fini(struct drm_device *dev); void i915_gem_context_open(struct drm_device *dev, struct drm_file *file); void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); +int i915_switch_context(struct intel_ring_buffer *ring, + struct drm_file *file, int to_id); /* i915_gem_gtt.c */ int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 2aca00235ce3..5248c00707f6 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -190,6 +190,11 @@ err_out: return ret; } +static inline bool is_default_context(struct i915_hw_context *ctx) +{ + return (ctx == ctx->ring->default_context); +} + /** * The default context needs to exist per ring that uses contexts. It stores the * context state of the GPU for applications that don't utilize HW contexts, as @@ -306,8 +311,147 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) mutex_unlock(&dev->struct_mutex); } -static __used struct i915_hw_context * +static struct i915_hw_context * i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) { return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id); } + +static inline int +mi_set_context(struct intel_ring_buffer *ring, + struct i915_hw_context *new_context, + u32 hw_flags) +{ + int ret; + + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_SET_CONTEXT); + intel_ring_emit(ring, new_context->obj->gtt_offset | + MI_MM_SPACE_GTT | + MI_SAVE_EXT_STATE_EN | + MI_RESTORE_EXT_STATE_EN | + hw_flags); + /* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */ + intel_ring_emit(ring, MI_NOOP); + + intel_ring_advance(ring); + + return ret; +} + +static int do_switch(struct drm_i915_gem_object *from_obj, + struct i915_hw_context *to, + u32 seqno) +{ + struct intel_ring_buffer *ring = NULL; + u32 hw_flags = 0; + int ret; + + BUG_ON(to == NULL); + BUG_ON(from_obj != NULL && from_obj->pin_count == 0); + + ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false); + if (ret) + return ret; + + if (!to->is_initialized || is_default_context(to)) + hw_flags |= MI_RESTORE_INHIBIT; + else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */ + hw_flags |= MI_FORCE_RESTORE; + + ring = to->ring; + ret = mi_set_context(ring, to, hw_flags); + if (ret) { + i915_gem_object_unpin(to->obj); + return ret; + } + + /* The backing object for the context is done after switching to the + * *next* context. Therefore we cannot retire the previous context until + * the next context has already started running. In fact, the below code + * is a bit suboptimal because the retiring can occur simply after the + * MI_SET_CONTEXT instead of when the next seqno has completed. + */ + if (from_obj != NULL) { + from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; + i915_gem_object_move_to_active(from_obj, ring, seqno); + /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the + * whole damn pipeline, we don't need to explicitly mark the + * object dirty. The only exception is that the context must be + * correct in case the object gets swapped out. Ideally we'd be + * able to defer doing this until we know the object would be + * swapped, but there is no way to do that yet. + */ + from_obj->dirty = 1; + BUG_ON(from_obj->ring != to->ring); + i915_gem_object_unpin(from_obj); + } + + ring->last_context_obj = to->obj; + to->is_initialized = true; + + return 0; +} + +/** + * i915_switch_context() - perform a GPU context switch. + * @ring: ring for which we'll execute the context switch + * @file_priv: file_priv associated with the context, may be NULL + * @id: context id number + * @seqno: sequence number by which the new context will be switched to + * @flags: + * + * The context life cycle is simple. The context refcount is incremented and + * decremented by 1 and create and destroy. If the context is in use by the GPU, + * it will have a refoucnt > 1. This allows us to destroy the context abstract + * object while letting the normal object tracking destroy the backing BO. + */ +int i915_switch_context(struct intel_ring_buffer *ring, + struct drm_file *file, + int to_id) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_file_private *file_priv = NULL; + struct i915_hw_context *to; + struct drm_i915_gem_object *from_obj = ring->last_context_obj; + int ret; + + if (dev_priv->hw_contexts_disabled) + return 0; + + if (ring != &dev_priv->ring[RCS]) + return 0; + + if (file) + file_priv = file->driver_priv; + + if (to_id == DEFAULT_CONTEXT_ID) { + to = ring->default_context; + } else { + to = i915_gem_context_get(file_priv, to_id); + if (to == NULL) + return -EINVAL; + } + + if (from_obj == to->obj) + return 0; + + ret = do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring)); + if (ret) + return ret; + + /* Just to make the code a little cleaner we take the object reference + * after the switch was successful. It would be more intuitive to ref + * the 'to' object before the switch but we know the refcount must be >0 + * if context_get() succeeded, and we hold struct mutex. So it's safe to + * do this here/now + */ + drm_gem_object_reference(&to->obj->base); + if (from_obj != NULL) + drm_gem_object_unreference(&from_obj->base); + return ret; +} diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index bb19becb1163..b7884b90474a 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -117,6 +117,7 @@ struct intel_ring_buffer { wait_queue_head_t irq_queue; struct i915_hw_context *default_context; + struct drm_i915_gem_object *last_context_obj; void *private; }; -- cgit v1.2.3 From 3af7b8572f5e375421ba6128585972d4a42a0762 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 14 Jun 2012 00:08:32 +0200 Subject: drm/i915: ensure context objects are bound to the global gtt This way round we don't introduce and ugly layering violations and use the interface as I planned to use it. Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_context.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 5248c00707f6..fb6aa5e10991 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -358,6 +358,9 @@ static int do_switch(struct drm_i915_gem_object *from_obj, if (ret) return ret; + if (!to->obj->has_global_gtt_mapping) + i915_gem_gtt_bind_object(to->obj, to->obj->cache_level); + if (!to->is_initialized || is_default_context(to)) hw_flags |= MI_RESTORE_INHIBIT; else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */ -- cgit v1.2.3 From e37ec39b180c53dea3106ceb8f247bcba47dfb82 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 4 Jun 2012 14:42:48 -0700 Subject: drm/i915: Ivybridge MI_ARB_ON_OFF context w/a The workaround itself applies to gen7 only (according to the docs) and as Eric Anholt points out shouldn't be required since we don't use HW scheduling features, and therefore arbitration. Though since it is a small, and simple addition, and we don't really understand the issue, just do it. FWIW, I eventually want to play with some of the arbitration stuff, and I'd hate to forget about this. Signed-off-by: Ben Widawsky --- drivers/gpu/drm/i915/i915_gem_context.c | 12 +++++++++++- drivers/gpu/drm/i915/i915_reg.h | 3 +++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index fb6aa5e10991..d49d5fc0a592 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -324,10 +324,15 @@ mi_set_context(struct intel_ring_buffer *ring, { int ret; - ret = intel_ring_begin(ring, 4); + ret = intel_ring_begin(ring, 6); if (ret) return ret; + if (IS_GEN7(ring->dev)) + intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE); + else + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_SET_CONTEXT); intel_ring_emit(ring, new_context->obj->gtt_offset | @@ -338,6 +343,11 @@ mi_set_context(struct intel_ring_buffer *ring, /* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */ intel_ring_emit(ring, MI_NOOP); + if (IS_GEN7(ring->dev)) + intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); + else + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return ret; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index bee101208195..14cb714df352 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -210,6 +210,9 @@ #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) #define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) +#define MI_ARB_ON_OFF MI_INSTR(0x08, 0) +#define MI_ARB_ENABLE (1<<0) +#define MI_ARB_DISABLE (0<<0) #define MI_SET_CONTEXT MI_INSTR(0x18, 0) #define MI_MM_SPACE_GTT (1<<8) #define MI_MM_SPACE_PHYSICAL (0<<8) -- cgit v1.2.3 From cc0f6398225ffd2b890ff83eafe212b1ae863cad Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 4 Jun 2012 14:42:49 -0700 Subject: drm/i915: PIPE_CONTROL_TLB_INVALIDATE This has showed up in several other patches. It's required for the next context workaround. I tested this one on its own and saw no differences in basic tests (performance or otherwise). This patch is relatively likely to cause regressions, hence why it's split out. Signed-off-by: Ben Widawsky --- drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_ringbuffer.c | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 14cb714df352..60562f0c7018 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -294,6 +294,7 @@ #define DISPLAY_PLANE_B (1<<20) #define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2)) #define PIPE_CONTROL_CS_STALL (1<<20) +#define PIPE_CONTROL_TLB_INVALIDATE (1<<18) #define PIPE_CONTROL_QW_WRITE (1<<14) #define PIPE_CONTROL_DEPTH_STALL (1<<13) #define PIPE_CONTROL_WRITE_FLUSH (1<<12) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 14025ab9d4ca..a041492fdd46 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -226,6 +226,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, * impact. */ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + flags |= PIPE_CONTROL_TLB_INVALIDATE; flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; -- cgit v1.2.3 From 12b0286f49947a6cdc9285032d918466a8c3f5f9 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 4 Jun 2012 14:42:50 -0700 Subject: drm/i915: possibly invalidate TLB before context switch From http://intellinuxgraphics.org/documentation/SNB/IHD_OS_Vol1_Part3.pdf [DevSNB] If Flush TLB invalidation Mode is enabled it's the driver's responsibility to invalidate the TLBs at least once after the previous context switch after any GTT mappings changed (including new GTT entries). This can be done by a pipelined PIPE_CONTROL with TLB inv bit set immediately before MI_SET_CONTEXT. On GEN7 the invalidation mode is explicitly set, but this appears to be lacking for GEN6. Since I don't know the history on this, I've decided to dynamically read the value at ring init time, and use that value throughout. v2: better comment (daniel) Signed-off-by: Ben Widawsky --- drivers/gpu/drm/i915/i915_gem_context.c | 11 +++++++++++ drivers/gpu/drm/i915/intel_ringbuffer.c | 7 +++++++ drivers/gpu/drm/i915/intel_ringbuffer.h | 4 ++++ 3 files changed, 22 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index d49d5fc0a592..fb1e1d22572c 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -324,6 +324,17 @@ mi_set_context(struct intel_ring_buffer *ring, { int ret; + /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB + * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value + * explicitly, so we rely on the value at ring init, stored in + * itlb_before_ctx_switch. + */ + if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) { + ret = ring->flush(ring, 0, 0); + if (ret) + return ret; + } + ret = intel_ring_begin(ring, 6); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index a041492fdd46..7a16f16371e6 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -423,6 +423,13 @@ static int init_render_ring(struct intel_ring_buffer *ring) */ I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); + + /* This is not explicitly set for GEN6, so read the register. + * see intel_ring_mi_set_context() for why we care. + * TODO: consider explicitly setting the bit for GEN5 + */ + ring->itlb_before_ctx_switch = + !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS); } if (INTEL_INFO(dev)->gen >= 6) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index b7884b90474a..594c9c4ad396 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -116,6 +116,10 @@ struct intel_ring_buffer { wait_queue_head_t irq_queue; + /** + * Do an explicit TLB flush before MI_SET_CONTEXT + */ + bool itlb_before_ctx_switch; struct i915_hw_context *default_context; struct drm_i915_gem_object *last_context_obj; -- cgit v1.2.3 From dfabbcb4f6a277992037fe199bcc1bf6bd44a996 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 4 Jun 2012 14:42:51 -0700 Subject: drm/i915: use the default context With the code to do HW context switches in place have the driver load the default context for the render ring when the driver loads. The default context will be an ever present context that is available to switch to at any time for the given ring. Signed-off-by: Ben Widawsky --- drivers/gpu/drm/i915/i915_gem_context.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index fb1e1d22572c..6a7b67d9f43f 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -97,6 +97,8 @@ static struct i915_hw_context * i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); +static int do_switch(struct drm_i915_gem_object *from_obj, + struct i915_hw_context *to, u32 seqno); static int get_context_size(struct drm_device *dev) { @@ -225,6 +227,14 @@ static int create_default_context(struct drm_i915_private *dev_priv) return ret; } + ret = do_switch(NULL, ctx, 0); + if (ret) { + i915_gem_object_unpin(ctx->obj); + do_destroy(ctx); + } else { + DRM_DEBUG_DRIVER("Default HW context loaded\n"); + } + return ret; } -- cgit v1.2.3 From b9a3906b609c4b546f00483089516c65fb2ff014 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 4 Jun 2012 14:42:52 -0700 Subject: drm/i915: add ccid to error state Signed-off-by: Ben Widawsky --- drivers/gpu/drm/i915/i915_debugfs.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_irq.c | 1 + 3 files changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 4fa00fcfbc96..2909b123baf5 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -713,6 +713,7 @@ static int i915_error_state(struct seq_file *m, void *unused) seq_printf(m, "EIR: 0x%08x\n", error->eir); seq_printf(m, "IER: 0x%08x\n", error->ier); seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); + seq_printf(m, "CCID: 0x%08x\n", error->ccid); for (i = 0; i < dev_priv->num_fence_regs; i++) seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7fb0364d5d61..6edc02b6dda4 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -176,6 +176,7 @@ struct drm_i915_error_state { u32 eir; u32 pgtbl_er; u32 ier; + u32 ccid; bool waiting[I915_NUM_RINGS]; u32 pipestat[I915_MAX_PIPES]; u32 tail[I915_NUM_RINGS]; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 0e876646d769..add0aaeadecd 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1156,6 +1156,7 @@ static void i915_capture_error_state(struct drm_device *dev) kref_init(&error->ref); error->eir = I915_READ(EIR); error->pgtbl_er = I915_READ(PGTBL_ER); + error->ccid = I915_READ(CCID); if (HAS_PCH_SPLIT(dev)) error->ier = I915_READ(DEIER) | I915_READ(GTIER); -- cgit v1.2.3 From f2ef6eb1453a95ce8ab388493793c54a6bedc405 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 4 Jun 2012 14:42:53 -0700 Subject: drm/i915: switch to default context on idle To keep things as sane as possible, switch to the default context before idling. This should help free context objects, as well as put things in a more well defined state before suspending. v2: remove seqno from context switch call (daniel) return error on failed context switch instead of WARN+continue (daniel) v3: move idling to i915_gpu idle (from i915_gem_idle) (Chris) Signed-off-by: Ben Widawsky --- drivers/gpu/drm/i915/i915_gem.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c06f50d44a23..3596f3415624 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2332,6 +2332,10 @@ int i915_gpu_idle(struct drm_device *dev) /* Is the device fubar? */ if (WARN_ON(!list_empty(&ring->gpu_write_list))) return -EBUSY; + + ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID); + if (ret) + return ret; } return 0; -- cgit v1.2.3 From 846248136dd3a6723135b8515ed7dc4c52a7b2ae Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 4 Jun 2012 14:42:54 -0700 Subject: drm/i915/context: create & destroy ioctls Add the interfaces to allow user space to create and destroy contexts. Contexts are destroyed automatically if the file descriptor for the dri device is closed. Following convention as usual here causes checkpatch warnings. v2: with is_initialized, no longer need to init at create drop the context switch on create (daniel) v3: Use interruptible lock (Chris) return -ENODEV in !GEM case (Chris) Signed-off-by: Ben Widawsky --- drivers/gpu/drm/i915/i915_dma.c | 2 ++ drivers/gpu/drm/i915/i915_drv.h | 4 +++ drivers/gpu/drm/i915/i915_gem_context.c | 55 +++++++++++++++++++++++++++++++++ include/drm/i915_drm.h | 15 +++++++++ 4 files changed, 76 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 3f3aca8c3767..ba75af12f1fd 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1848,6 +1848,8 @@ struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED), }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6edc02b6dda4..03e7f9e683e3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1386,6 +1386,10 @@ void i915_gem_context_open(struct drm_device *dev, struct drm_file *file); void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); int i915_switch_context(struct intel_ring_buffer *ring, struct drm_file *file, int to_id); +int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); /* i915_gem_gtt.c */ int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 6a7b67d9f43f..5642c4019b53 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -489,3 +489,58 @@ int i915_switch_context(struct intel_ring_buffer *ring, drm_gem_object_unreference(&from_obj->base); return ret; } + +int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_context_create *args = data; + struct drm_i915_file_private *file_priv = file->driver_priv; + struct i915_hw_context *ctx; + int ret; + + if (!(dev->driver->driver_features & DRIVER_GEM)) + return -ENODEV; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + ret = create_hw_context(dev, file_priv, &ctx); + mutex_unlock(&dev->struct_mutex); + + args->ctx_id = ctx->id; + DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id); + + return ret; +} + +int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_context_destroy *args = data; + struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_hw_context *ctx; + int ret; + + if (!(dev->driver->driver_features & DRIVER_GEM)) + return -ENODEV; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + ctx = i915_gem_context_get(file_priv, args->ctx_id); + if (!ctx) { + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + + do_destroy(ctx); + + mutex_unlock(&dev->struct_mutex); + + DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id); + return 0; +} diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 5c28ee1d1911..5da73244486a 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -201,6 +201,8 @@ typedef struct _drm_i915_sarea { #define DRM_I915_GET_SPRITE_COLORKEY 0x2a #define DRM_I915_SET_SPRITE_COLORKEY 0x2b #define DRM_I915_GEM_WAIT 0x2c +#define DRM_I915_GEM_CONTEXT_CREATE 0x2d +#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) @@ -245,6 +247,8 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) #define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) +#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) +#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) /* Allow drivers to submit batchbuffers directly to hardware, relying * on the security mechanisms provided by hardware. @@ -897,4 +901,15 @@ struct drm_i915_gem_wait { __s64 timeout_ns; }; +struct drm_i915_gem_context_create { + /* output: id of new context*/ + __u32 ctx_id; + __u32 pad; +}; + +struct drm_i915_gem_context_destroy { + __u32 ctx_id; + __u32 pad; +}; + #endif /* _I915_DRM_H_ */ -- cgit v1.2.3 From 6e0a69dbc81b88f5a42e08344203021571f6fb2f Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 4 Jun 2012 14:42:55 -0700 Subject: drm/i915/context: switch contexts with execbuf2 Use the rsvd1 field in execbuf2 to specify the context ID associated with the workload. This will allow the driver to do the proper context switch when/if needed. v2: Add checks for context switches on rings not supporting contexts. Before the code would silently ignore such requests. Signed-off-by: Ben Widawsky --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 16 ++++++++++++++++ include/drm/i915_drm.h | 8 +++++++- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 974a9f1068a3..f32d02464bce 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1044,6 +1044,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_i915_gem_object *batch_obj; struct drm_clip_rect *cliprects = NULL; struct intel_ring_buffer *ring; + u32 ctx_id = i915_execbuffer2_get_context_id(*args); u32 exec_start, exec_len; u32 seqno; u32 mask; @@ -1065,9 +1066,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, break; case I915_EXEC_BSD: ring = &dev_priv->ring[VCS]; + if (ctx_id != 0) { + DRM_DEBUG("Ring %s doesn't support contexts\n", + ring->name); + return -EPERM; + } break; case I915_EXEC_BLT: ring = &dev_priv->ring[BCS]; + if (ctx_id != 0) { + DRM_DEBUG("Ring %s doesn't support contexts\n", + ring->name); + return -EPERM; + } break; default: DRM_DEBUG("execbuf with unknown ring: %d\n", @@ -1261,6 +1272,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } + ret = i915_switch_context(ring, file, ctx_id); + if (ret) + goto err; + trace_i915_gem_ring_dispatch(ring, seqno); exec_start = batch_obj->gtt_offset + args->batch_start_offset; @@ -1367,6 +1382,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, exec2.num_cliprects = args->num_cliprects; exec2.cliprects_ptr = args->cliprects_ptr; exec2.flags = I915_EXEC_RENDER; + i915_execbuffer2_set_context_id(exec2, 0); ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); if (!ret) { diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 5da73244486a..8cc70837f929 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -663,13 +663,19 @@ struct drm_i915_gem_execbuffer2 { #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ __u64 flags; - __u64 rsvd1; + __u64 rsvd1; /* now used for context info */ __u64 rsvd2; }; /** Resets the SO write offset registers for transform feedback on gen7. */ #define I915_EXEC_GEN7_SOL_RESET (1<<8) +#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) +#define i915_execbuffer2_set_context_id(eb2, context) \ + (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK +#define i915_execbuffer2_get_context_id(eb2) \ + ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK) + struct drm_i915_gem_pin { /** Handle of the buffer to be pinned. */ __u32 handle; -- cgit v1.2.3 From 8e96d9c4d9843f00ebeb4a9b33596d96602ea101 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 4 Jun 2012 14:42:56 -0700 Subject: drm/i915: reset the GPU on context fini It's the only way we know how to make the GPU actually forget about the default context. Signed-off-by: Ben Widawsky --- drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem_context.c | 2 ++ 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 5d9e07ece3c2..f0bd30ab1551 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -799,7 +799,7 @@ static int gen6_do_reset(struct drm_device *dev) return ret; } -static int intel_gpu_reset(struct drm_device *dev) +int intel_gpu_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int ret = -ENODEV; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 03e7f9e683e3..6f29fd141ab0 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1184,6 +1184,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, extern int i915_emit_box(struct drm_device *dev, struct drm_clip_rect *box, int DR1, int DR4); +extern int intel_gpu_reset(struct drm_device *dev); extern int i915_reset(struct drm_device *dev); extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 5642c4019b53..6dc426a48b2d 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -278,6 +278,8 @@ void i915_gem_context_fini(struct drm_device *dev) i915_gem_object_unpin(dev_priv->ring[RCS].default_context->obj); do_destroy(dev_priv->ring[RCS].default_context); + + intel_gpu_reset(dev); } void i915_gem_context_open(struct drm_device *dev, struct drm_file *file) -- cgit v1.2.3 From aaa377302b2994fcc2c66741b47da33feb489dca Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sat, 16 Jun 2012 15:30:32 +0200 Subject: drm/i915/crt: Do not rely upon the HPD presence pin VGA hotplug detection "works" by measuring the resistance across certain pins. A lot of kvm switches fumble this and wire up cheap resistors with the wrong resistance or don't bother at all. To accomodate these, also try to detect a connected monitor by trying to grab the edid. Contrary to !HAS_HOTPLUG platforms we don't bother with an actual load-detection cycle when the output is life - that would be actual work to implement because things moved around. This is the big difference to Chris Wilson's original approach: commit 9e612a008fa7fe493a473454def56aa321479495 Author: Chris Wilson Date: Thu May 31 13:08:53 2012 +0100 drm/i915/crt: Do not rely upon the HPD presence pin This blew up on Linus' machine because it errornously detected a vga screen (without and edid and hence only the default modes), leading to it's prompt removal: commit 8f53369b753f5f4c7684c2eb0b592152abb1dd00 Author: Linus Torvalds Date: Fri Jun 8 14:53:06 2012 -0700 Revert "drm/i915/crt: Do not rely upon the HPD presence pin" Some digging around in Bspec shows the reason why load detect doesn't work on newer chips - the legacy VGA load detect bit isn't wired up any longer: Public Snb Bspec, Vol3 Part1, 1.1.1 ST00 Input Status 0, bit4: "RGB Comparator / Sense. This bit is here for compatibility and will always return one. Monitor detection must be done be done through the programming of registers in the MMIO space. 0 = Below threshold 1 = Above threshold" v2: Add a comment in the code that load detect on hotplug capable machines is broken and pimp the commit message with a quote of Bspec to show why. Reported-and-tested-by: Matthieu LAVIE Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=50501 Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_crt.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 75a70c46ef1b..5978490dac92 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -453,18 +453,27 @@ intel_crt_detect(struct drm_connector *connector, bool force) struct intel_load_detect_pipe tmp; if (I915_HAS_HOTPLUG(dev)) { + /* We can not rely on the HPD pin always being correctly wired + * up, for example many KVM do not pass it through, and so + * only trust an assertion that the monitor is connected. + */ if (intel_crt_detect_hotplug(connector)) { DRM_DEBUG_KMS("CRT detected via hotplug\n"); return connector_status_connected; - } else { + } else DRM_DEBUG_KMS("CRT not detected via hotplug\n"); - return connector_status_disconnected; - } } if (intel_crt_detect_ddc(connector)) return connector_status_connected; + /* Load detection is broken on HPD capable machines. Whoever wants a + * broken monitor (without edid) to work behind a broken kvm (that fails + * to have the right resistors for HP detection) needs to fix this up. + * For now just bail out. */ + if (I915_HAS_HOTPLUG(dev)) + return connector_status_disconnected; + if (!force) return connector->status; -- cgit v1.2.3 From 39fb50f6178d7dbd70142b25193d722ebf3f8193 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Fri, 8 Jun 2012 16:43:19 -0300 Subject: drm/i915: properly wait for SBI status Somehow this went unnoticed in the past reviews, but the condition would never timeout properly. This was initially introduced in the v2 of original SBI enabling patch. Highly embarrassing. Note that we now actually time out for the read, which resulted in gcc complaining that we can now return unitialized garbage if that happens. There's not much we can do here because there's not much point in thread -EIO all the way down through these functions. Hence simply shut up the compiler. Reported-by: Dan Carpenter Signed-off-by: Eugeni Dodonov [danvet: Added note and squashed uninitialized value shut-up into this patch.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 308e1a2967e7..97301621f48a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1350,7 +1350,7 @@ intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value) unsigned long flags; spin_lock_irqsave(&dev_priv->dpio_lock, flags); - if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0, + if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) { DRM_ERROR("timeout waiting for SBI to become ready\n"); goto out_unlock; @@ -1364,7 +1364,7 @@ intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value) SBI_BUSY | SBI_CTL_OP_CRWR); - if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0, + if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, 100)) { DRM_ERROR("timeout waiting for SBI to complete write transaction\n"); goto out_unlock; @@ -1378,10 +1378,10 @@ static u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg) { unsigned long flags; - u32 value; + u32 value = 0; spin_lock_irqsave(&dev_priv->dpio_lock, flags); - if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0, + if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) { DRM_ERROR("timeout waiting for SBI to become ready\n"); goto out_unlock; @@ -1393,7 +1393,7 @@ intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg) SBI_BUSY | SBI_CTL_OP_CRRD); - if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0, + if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, 100)) { DRM_ERROR("timeout waiting for SBI to complete read transaction\n"); goto out_unlock; -- cgit v1.2.3 From e158c5aa1776372cd751e2c395300a3a6ff0bc9c Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Sun, 17 Jun 2012 09:37:24 -0700 Subject: drm/i915: disable contexts on old HW This got dropped as a result of the last round of comments. I didn't test it on unsupported HW (which this is likely the case). Note that this prevents hw context from blowing up on any pre-gen6 hw. Signed-off-by: Ben Widawsky Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=51142 [danvet: Added note and buglink.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_context.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 6dc426a48b2d..8fb8cd8f6320 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -243,8 +243,10 @@ void i915_gem_context_init(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; uint32_t ctx_size; - if (!HAS_HW_CONTEXTS(dev)) + if (!HAS_HW_CONTEXTS(dev)) { + dev_priv->hw_contexts_disabled = true; return; + } /* If called from reset, or thaw... we've been here already */ if (dev_priv->hw_contexts_disabled || -- cgit v1.2.3 From 208482232de3590cee4757dfabe5d8cee8c6e626 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Fri, 4 May 2012 18:58:59 -0700 Subject: drm/i915: set IDICOS to medium uncore resources I'm seeing about a 5% FPS improvement across various benchmarks on my IVB i3. Rumor has it that the higher end parts show even more benefit. This derives from a patch originally given to me by Bernard. The docs are confusing about the definition names (ie. medium really seems like max), but it would seem it gives more cache to the GT at the expense of uncore. This configuration makes the split most in favor of the GT. I've not tried the other IDICOS values. Cc: "Kilarski, Bernard R" Acked-by: Eric Anholt Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index b7de5ea62aa4..404b474eaea3 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3384,6 +3384,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int pipe; uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; + uint32_t snpcr; I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); @@ -3429,6 +3430,11 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) /* WaDisable4x2SubspanOptimization */ I915_WRITE(CACHE_MODE_1, _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); + + snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); + snpcr &= ~GEN6_MBC_SNPCR_MASK; + snpcr |= GEN6_MBC_SNPCR_MED; + I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); } static void valleyview_init_clock_gating(struct drm_device *dev) -- cgit v1.2.3 From e080b915ecea3fff3e0d8d380a221677add419a5 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sat, 16 Jun 2012 18:29:44 +0200 Subject: drm/i915: fixup hangman rebase goof-up I've added a bit of logic such that running the hangman test on chips without any hw reset support at all doesn't wedge the gpu because the reset failed. This relied on checking for non-null stop_rings. Unfortunately I've botched a rebase somewhere and stop_rings is still cleared at the old place before the reset code. Fix this up so that running the i-g-t tests on gen2/3 doesn't result in a wedged gpu. v2: Actually remove the lines instead of adding them twice ... my git license should be revoked immediately. Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f0bd30ab1551..e41aadef9937 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -860,8 +860,6 @@ int i915_reset(struct drm_device *dev) if (!mutex_trylock(&dev->struct_mutex)) return -EBUSY; - dev_priv->stop_rings = 0; - i915_gem_reset(dev); ret = -ENODEV; -- cgit v1.2.3 From 0f846f81a154cc1818416918d22939bda73da194 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 14 Jun 2012 11:04:47 -0700 Subject: drm/i915: disable RCBP and VDS unit clock gating on SNB and VLV The RCBP workaround still applies on these chips, and we need VDS as well. v2: remove MB boot fetch that snuck in (Daniel) add workaround tags to comments for easier internal tracking (Daniel) v3: only apply RCPB and VDS on SNB and VLV, IVB doesn't need them (Eugeni) References: https://bugs.freedesktop.org/show_bug.cgi?id=50251 Signed-off-by: Jesse Barnes Reviewed-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_pm.c | 54 +++++++++++++++++++++++++++++++++-------- 2 files changed, 45 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 60562f0c7018..e748f665a0d9 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4037,6 +4037,7 @@ # define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7) #define GEN6_UCGCTL2 0x9404 +# define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30) # define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13) # define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12) # define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 404b474eaea3..54c5e53dd415 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3330,8 +3330,12 @@ static void gen6_init_clock_gating(struct drm_device *dev) * * According to the spec, bit 11 (RCCUNIT) must also be set, * but we didn't debug actual testcases to find it out. + * + * Also apply WaDisableVDSUnitClockGating and + * WaDisableRCPBUnitClockGating. */ I915_WRITE(GEN6_UCGCTL2, + GEN7_VDSUNIT_CLOCK_GATE_DISABLE | GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | GEN6_RCCUNIT_CLOCK_GATE_DISABLE); @@ -3392,11 +3396,6 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) I915_WRITE(WM2_LP_ILK, 0); I915_WRITE(WM1_LP_ILK, 0); - /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. - * This implements the WaDisableRCZUnitClockGating workaround. - */ - I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); - I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); I915_WRITE(IVB_CHICKEN3, @@ -3413,6 +3412,23 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE); + /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock + * gating disable must be set. Failure to set it results in + * flickering pixels due to Z write ordering failures after + * some amount of runtime in the Mesa "fire" demo, and Unigine + * Sanctuary and Tropics, and apparently anything else with + * alpha test or pixel discard. + * + * According to the spec, bit 11 (RCCUNIT) must also be set, + * but we didn't debug actual testcases to find it out. + * + * According to the spec, bit 13 (RCZUNIT) must be set on IVB. + * This implements the WaDisableRCZUnitClockGating workaround. + */ + I915_WRITE(GEN6_UCGCTL2, + GEN6_RCZUNIT_CLOCK_GATE_DISABLE | + GEN6_RCCUNIT_CLOCK_GATE_DISABLE); + /* This is required by WaCatErrorRejectionIssue */ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | @@ -3449,11 +3465,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev) I915_WRITE(WM2_LP_ILK, 0); I915_WRITE(WM1_LP_ILK, 0); - /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. - * This implements the WaDisableRCZUnitClockGating workaround. - */ - I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); - I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); I915_WRITE(IVB_CHICKEN3, @@ -3473,6 +3484,29 @@ static void valleyview_init_clock_gating(struct drm_device *dev) I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); + + /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock + * gating disable must be set. Failure to set it results in + * flickering pixels due to Z write ordering failures after + * some amount of runtime in the Mesa "fire" demo, and Unigine + * Sanctuary and Tropics, and apparently anything else with + * alpha test or pixel discard. + * + * According to the spec, bit 11 (RCCUNIT) must also be set, + * but we didn't debug actual testcases to find it out. + * + * According to the spec, bit 13 (RCZUNIT) must be set on IVB. + * This implements the WaDisableRCZUnitClockGating workaround. + * + * Also apply WaDisableVDSUnitClockGating and + * WaDisableRCPBUnitClockGating. + */ + I915_WRITE(GEN6_UCGCTL2, + GEN7_VDSUNIT_CLOCK_GATE_DISABLE | + GEN6_RCZUNIT_CLOCK_GATE_DISABLE | + GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | + GEN6_RCCUNIT_CLOCK_GATE_DISABLE); + for_each_pipe(pipe) { I915_WRITE(DSPCNTR(pipe), I915_READ(DSPCNTR(pipe)) | -- cgit v1.2.3 From b4ae3f22d238617ca11610b29fde16cf8c0bc6e0 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 14 Jun 2012 11:04:48 -0700 Subject: drm/i915: load boot context at driver init time According to the bspec for MBCTL: Driver must set bit in the following scenarios: - to realod teh h/w boot context every time it gets loaded through OS - after an FLR clears the register (BIOS won't run afterwards) References: https://bugs.freedesktop.org/show_bug.cgi?id=50237 Signed-off-by: Jesse Barnes Reviewed-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 54c5e53dd415..83c0e226f50c 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3363,6 +3363,9 @@ static void gen6_init_clock_gating(struct drm_device *dev) ILK_DPARB_CLK_GATE | ILK_DPFD_CLK_GATE); + I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | + GEN6_MBCTL_ENABLE_BOOT_FETCH); + for_each_pipe(pipe) { I915_WRITE(DSPCNTR(pipe), I915_READ(DSPCNTR(pipe)) | @@ -3441,6 +3444,9 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) intel_flush_display_plane(dev_priv, pipe); } + I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | + GEN6_MBCTL_ENABLE_BOOT_FETCH); + gen7_setup_fixed_func_scheduler(dev_priv); /* WaDisable4x2SubspanOptimization */ @@ -3484,6 +3490,9 @@ static void valleyview_init_clock_gating(struct drm_device *dev) I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); + I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | + GEN6_MBCTL_ENABLE_BOOT_FETCH); + /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock * gating disable must be set. Failure to set it results in -- cgit v1.2.3 From 6edaa7fcf287b92fb231a9e23cd6b5b0fc3dddb2 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 14 Jun 2012 11:04:49 -0700 Subject: drm/i915: add TDL unit clock gating disable for VLV Another required workaround for a potential hang: WaDisableTDLUnitClockGating. v2: only apply this to VLV, IVB doesn't need it anymore (Eugeni) References: https://bugs.freedesktop.org/show_bug.cgi?id=50245 Signed-off-by: Jesse Barnes Reviewed-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_pm.c | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index e748f665a0d9..5d7cf5f7e750 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4038,6 +4038,7 @@ #define GEN6_UCGCTL2 0x9404 # define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30) +# define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22) # define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13) # define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12) # define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 83c0e226f50c..47c1a3e9b4af 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3512,6 +3512,7 @@ static void valleyview_init_clock_gating(struct drm_device *dev) */ I915_WRITE(GEN6_UCGCTL2, GEN7_VDSUNIT_CLOCK_GATE_DISABLE | + GEN7_TDLUNIT_CLOCK_GATE_DISABLE | GEN6_RCZUNIT_CLOCK_GATE_DISABLE | GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | GEN6_RCCUNIT_CLOCK_GATE_DISABLE); -- cgit v1.2.3 From e3f33d46fd917747e966f8e6d25f2940223ad1ee Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 14 Jun 2012 11:04:50 -0700 Subject: drm/i915: add L3 bank clock gating disable on VLV Prevents a possible hang: WaDisableL3Bank2xClockGate. v2: only apply to VLV, IVB doesn't need this anymore References: https://bugs.freedesktop.org/show_bug.cgi?id=50245 Signed-off-by: Jesse Barnes Reviewed-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 3 +++ drivers/gpu/drm/i915/intel_pm.c | 2 ++ 2 files changed, 5 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5d7cf5f7e750..782e5d1dc218 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4043,6 +4043,9 @@ # define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12) # define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11) +#define GEN7_UCGCTL4 0x940c +#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25) + #define GEN6_RPNSWREQ 0xA008 #define GEN6_TURBO_DISABLE (1<<31) #define GEN6_FREQUENCY(x) ((x)<<25) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 47c1a3e9b4af..17c16f026244 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3517,6 +3517,8 @@ static void valleyview_init_clock_gating(struct drm_device *dev) GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | GEN6_RCCUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE); + for_each_pipe(pipe) { I915_WRITE(DSPCNTR(pipe), I915_READ(DSPCNTR(pipe)) | -- cgit v1.2.3 From 55a6662837a5efe48c836bfc3570ace348f3db09 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 19 Jun 2012 21:55:32 +0200 Subject: drm/i915: fix module unload after context merge commit 8e96d9c4d9843f00ebeb4a9b33596d96602ea101 Author: Ben Widawsky Date: Mon Jun 4 14:42:56 2012 -0700 drm/i915: reset the GPU on context fini broke module unload because it reset the gpu before we've stopped touching it. Later on in the unload sequence the ringbuffer code complained that the gpu would idle properly (because intel_gpu_reset only resets the hw and not our sw state). v2: Reorder things so that we reset the gpu _before_ we release the backing storage of the default context. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=51183 Reviewed-by: Ben Widawsky Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 2 +- drivers/gpu/drm/i915/i915_gem_context.c | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index ba75af12f1fd..9913a77478a3 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1669,7 +1669,6 @@ int i915_driver_unload(struct drm_device *dev) if (ret) DRM_ERROR("failed to idle hardware: %d\n", ret); i915_gem_retire_requests(dev); - i915_gem_context_fini(dev); mutex_unlock(&dev->struct_mutex); /* Cancel the retire work handler, which should be idle now. */ @@ -1720,6 +1719,7 @@ int i915_driver_unload(struct drm_device *dev) mutex_lock(&dev->struct_mutex); i915_gem_free_all_phys_object(dev); i915_gem_cleanup_ringbuffer(dev); + i915_gem_context_fini(dev); mutex_unlock(&dev->struct_mutex); i915_gem_cleanup_aliasing_ppgtt(dev); i915_gem_cleanup_stolen(dev); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 8fb8cd8f6320..48e41df075b9 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -277,11 +277,14 @@ void i915_gem_context_fini(struct drm_device *dev) if (dev_priv->hw_contexts_disabled) return; + /* The only known way to stop the gpu from accessing the hw context is + * to reset it. Do this as the very last operation to avoid confusing + * other code, leading to spurious errors. */ + intel_gpu_reset(dev); + i915_gem_object_unpin(dev_priv->ring[RCS].default_context->obj); do_destroy(dev_priv->ring[RCS].default_context); - - intel_gpu_reset(dev); } void i915_gem_context_open(struct drm_device *dev, struct drm_file *file) -- cgit v1.2.3 From df12c6d5ec9b88fc45c672b77dac015327dd8497 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 19 Jun 2012 16:52:30 +0200 Subject: drm/i915: initialize the context idr unconditionally It doesn't hurt and it at least prevents us from OOPSing left and right at quite a few places. This also allows us to simplify the code a bit by folding the only line of context_open into the callsite. We obviuosly also need to run the cleanup code unconditionally, too. Reviewed-by: Ben Widawsky Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/i915_gem_context.c | 15 --------------- 3 files changed, 1 insertion(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 9913a77478a3..9563ab8390e7 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1759,7 +1759,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file) spin_lock_init(&file_priv->mm.lock); INIT_LIST_HEAD(&file_priv->mm.request_list); - i915_gem_context_open(dev, file); + idr_init(&file_priv->context_idr); return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6f29fd141ab0..82e657eafd55 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1383,7 +1383,6 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev, /* i915_gem_context.c */ void i915_gem_context_init(struct drm_device *dev); void i915_gem_context_fini(struct drm_device *dev); -void i915_gem_context_open(struct drm_device *dev, struct drm_file *file); void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); int i915_switch_context(struct intel_ring_buffer *ring, struct drm_file *file, int to_id); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 48e41df075b9..047f81c206b3 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -287,17 +287,6 @@ void i915_gem_context_fini(struct drm_device *dev) do_destroy(dev_priv->ring[RCS].default_context); } -void i915_gem_context_open(struct drm_device *dev, struct drm_file *file) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_file_private *file_priv = file->driver_priv; - - if (dev_priv->hw_contexts_disabled) - return; - - idr_init(&file_priv->context_idr); -} - static int context_idr_cleanup(int id, void *p, void *data) { struct drm_file *file = (struct drm_file *)data; @@ -316,12 +305,8 @@ static int context_idr_cleanup(int id, void *p, void *data) void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_file_private *file_priv = file->driver_priv; - if (dev_priv->hw_contexts_disabled) - return; - mutex_lock(&dev->struct_mutex); idr_for_each(&file_priv->context_idr, context_idr_cleanup, file); idr_destroy(&file_priv->context_idr); -- cgit v1.2.3 From 0d326013763bd4610cb72d9e2fa7f8ff2f414995 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 19 Jun 2012 16:52:31 +0200 Subject: drm/i915: return -ENOENT if the context doesn't exist This is our customary "no such object" errno, not -EINVAL. Reviewed-by: Ben Widawsky Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_context.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 047f81c206b3..f775d861f052 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -460,7 +460,7 @@ int i915_switch_context(struct intel_ring_buffer *ring, } else { to = i915_gem_context_get(file_priv, to_id); if (to == NULL) - return -EINVAL; + return -ENOENT; } if (from_obj == to->obj) @@ -526,7 +526,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, ctx = i915_gem_context_get(file_priv, args->ctx_id); if (!ctx) { mutex_unlock(&dev->struct_mutex); - return -EINVAL; + return -ENOENT; } do_destroy(ctx); -- cgit v1.2.3 From 6f4c45c12c6775b8158fc143115037d084df12c3 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 19 Jun 2012 16:52:32 +0200 Subject: drm/i915/context: shut up compiler It found some unused variables. Reviewed-by: Ben Widawsky Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_context.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index f775d861f052..a52fd21e4807 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -485,7 +485,6 @@ int i915_switch_context(struct intel_ring_buffer *ring, int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_context_create *args = data; struct drm_i915_file_private *file_priv = file->driver_priv; struct i915_hw_context *ctx; @@ -512,7 +511,6 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_context_destroy *args = data; struct drm_i915_file_private *file_priv = file->driver_priv; - struct drm_i915_private *dev_priv = dev->dev_private; struct i915_hw_context *ctx; int ret; -- cgit v1.2.3 From 73c273eb7547d2557b49f9d1f42e298145c1e635 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 19 Jun 2012 20:27:39 +0200 Subject: drm/i915: simplify context_idr_cleanup The idr code already passes us the pointer associated with that id, so no need to look it up again. Also, we'll kill the idr right away, so there's no issue with leaving these dangling pointers behind - the current code does the same. v2: Also drop the file argument, spotted by Ben Widawsky. Reviewed-by: Ben Widawsky Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_context.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index a52fd21e4807..1b5041c52569 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -289,14 +289,9 @@ void i915_gem_context_fini(struct drm_device *dev) static int context_idr_cleanup(int id, void *p, void *data) { - struct drm_file *file = (struct drm_file *)data; - struct drm_i915_file_private *file_priv = file->driver_priv; - struct i915_hw_context *ctx; + struct i915_hw_context *ctx = p; BUG_ON(id == DEFAULT_CONTEXT_ID); - ctx = i915_gem_context_get(file_priv, id); - if (WARN_ON(ctx == NULL)) - return -ENXIO; do_destroy(ctx); @@ -308,7 +303,7 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) struct drm_i915_file_private *file_priv = file->driver_priv; mutex_lock(&dev->struct_mutex); - idr_for_each(&file_priv->context_idr, context_idr_cleanup, file); + idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); idr_destroy(&file_priv->context_idr); mutex_unlock(&dev->struct_mutex); } -- cgit v1.2.3 From 5fa8be65e950e4f2a4e7dc31f823fca41b9e650e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 19 Jun 2012 17:16:01 +0200 Subject: drm/i915: return -ENODEV if hw context are not supported Otherwise userspace has no way to figure this out. Reviewed-by: Ben Widawsky Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_context.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 1b5041c52569..e58e8366f473 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -480,6 +480,7 @@ int i915_switch_context(struct intel_ring_buffer *ring, int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_context_create *args = data; struct drm_i915_file_private *file_priv = file->driver_priv; struct i915_hw_context *ctx; @@ -488,6 +489,9 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; + if (dev_priv->hw_contexts_disabled) + return -ENODEV; + ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; -- cgit v1.2.3 From 8e88a2bd5987178d16d53686197404e149e996d9 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 19 Jun 2012 18:40:00 +0200 Subject: drm/i915: don't call modeset_init_hw in i915_reset It seems to blow up my ilk in all kinds of strange ways. And now that we're no longer resetting the entire modeset state, it shouldn't be necessary any longer. This essentially reverts commit f817586cebf1b946d1f327f9a596048efd6b64e9 Author: Daniel Vetter Date: Tue Apr 10 15:50:11 2012 +0200 drm/i915: re-init modeset hw state after gpu reset safe for the introduction of modeset_init_hw, that one is nice to prevent code duplication between driver load and resume. v2: Add a comment to the code to warn future travellers of the dragon dungeon ahead, suggested by Chris Wilson. Acked-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index e41aadef9937..d7dd60bb2745 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -904,10 +904,13 @@ int i915_reset(struct drm_device *dev) i915_gem_context_init(dev); i915_gem_init_ppgtt(dev); - mutex_unlock(&dev->struct_mutex); + /* + * It would make sense to re-init all the other hw state, at + * least the rps/rc6/emon init done within modeset_init_hw. For + * some unknown reason, this blows up my ilk, so don't. + */ - if (drm_core_check_feature(dev, DRIVER_MODESET)) - intel_modeset_init_hw(dev); + mutex_unlock(&dev->struct_mutex); drm_irq_uninstall(dev); drm_irq_install(dev); -- cgit v1.2.3 From cc889e0f6ce6a63c62db17d702ecfed86d58083f Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 13 Jun 2012 20:45:19 +0200 Subject: drm/i915: disable flushing_list/gpu_write_list This is just the minimal patch to disable all this code so that we can do decent amounts of QA before we rip it all out. The complicating thing is that we need to flush the gpu caches after the batchbuffer is emitted. Which is past the point of no return where execbuffer can't fail any more (otherwise we risk submitting the same batch multiple times). Hence we need to add a flag to track whether any caches associated with that ring are dirty. And emit the flush in add_request if that's the case. Note that this has a quite a few behaviour changes: - Caches get flushed/invalidated unconditionally. - Invalidation now happens after potential inter-ring sync. I've bantered around a bit with Chris on irc whether this fixes anything, and it might or might not. The only thing clear is that with these changes it's much easier to reason about correctness. Also rip out a lone get_next_request_seqno in the execbuffer retire_commands function. I've dug around and I couldn't figure out why that is still there, with the outstanding lazy request stuff it shouldn't be necessary. v2: Chris Wilson complained that I also invalidate the read caches when flushing after a batchbuffer. Now optimized. v3: Added some comments to explain the new flushing behaviour. Cc: Eric Anholt Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 25 +++++++++++--- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 52 +++++++----------------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 1 + 3 files changed, 33 insertions(+), 45 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3596f3415624..6a98c0659324 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1568,6 +1568,21 @@ i915_add_request(struct intel_ring_buffer *ring, int was_empty; int ret; + /* + * Emit any outstanding flushes - execbuf can fail to emit the flush + * after having emitted the batchbuffer command. Hence we need to fix + * things up similar to emitting the lazy request. The difference here + * is that the flush _must_ happen before the next request, no matter + * what. + */ + if (ring->gpu_caches_dirty) { + ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS); + if (ret) + return ret; + + ring->gpu_caches_dirty = false; + } + BUG_ON(request == NULL); seqno = i915_gem_next_request_seqno(ring); @@ -1613,6 +1628,9 @@ i915_add_request(struct intel_ring_buffer *ring, queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); } + + WARN_ON(!list_empty(&ring->gpu_write_list)); + return 0; } @@ -1827,14 +1845,11 @@ i915_gem_retire_work_handler(struct work_struct *work) */ idle = true; for_each_ring(ring, dev_priv, i) { - if (!list_empty(&ring->gpu_write_list)) { + if (ring->gpu_caches_dirty) { struct drm_i915_gem_request *request; - int ret; - ret = i915_gem_flush_ring(ring, - 0, I915_GEM_GPU_DOMAINS); request = kzalloc(sizeof(*request), GFP_KERNEL); - if (ret || request == NULL || + if (request == NULL || i915_add_request(ring, NULL, request)) kfree(request); } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index f32d02464bce..88e2e114189c 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -810,33 +810,16 @@ err: return ret; } -static int +static void i915_gem_execbuffer_flush(struct drm_device *dev, uint32_t invalidate_domains, - uint32_t flush_domains, - uint32_t flush_rings) + uint32_t flush_domains) { - drm_i915_private_t *dev_priv = dev->dev_private; - int i, ret; - if (flush_domains & I915_GEM_DOMAIN_CPU) intel_gtt_chipset_flush(); if (flush_domains & I915_GEM_DOMAIN_GTT) wmb(); - - if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { - for (i = 0; i < I915_NUM_RINGS; i++) - if (flush_rings & (1 << i)) { - ret = i915_gem_flush_ring(&dev_priv->ring[i], - invalidate_domains, - flush_domains); - if (ret) - return ret; - } - } - - return 0; } static int @@ -885,12 +868,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, i915_gem_object_set_to_gpu_domain(obj, ring, &cd); if (cd.invalidate_domains | cd.flush_domains) { - ret = i915_gem_execbuffer_flush(ring->dev, - cd.invalidate_domains, - cd.flush_domains, - cd.flush_rings); - if (ret) - return ret; + i915_gem_execbuffer_flush(ring->dev, + cd.invalidate_domains, + cd.flush_domains); } if (cd.flips) { @@ -905,6 +885,11 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, return ret; } + /* Unconditionally invalidate gpu caches. */ + ret = i915_gem_flush_ring(ring, I915_GEM_GPU_DOMAINS, 0); + if (ret) + return ret; + return 0; } @@ -983,26 +968,13 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) { struct drm_i915_gem_request *request; - u32 invalidate; - /* - * Ensure that the commands in the batch buffer are - * finished before the interrupt fires. - * - * The sampler always gets flushed on i965 (sigh). - */ - invalidate = I915_GEM_DOMAIN_COMMAND; - if (INTEL_INFO(dev)->gen >= 4) - invalidate |= I915_GEM_DOMAIN_SAMPLER; - if (ring->flush(ring, invalidate, 0)) { - i915_gem_next_request_seqno(ring); - return; - } + /* Unconditionally force add_request to emit a full flush. */ + ring->gpu_caches_dirty = true; /* Add a breadcrumb for the completion of the batch buffer */ request = kzalloc(sizeof(*request), GFP_KERNEL); if (request == NULL || i915_add_request(ring, file, request)) { - i915_gem_next_request_seqno(ring); kfree(request); } } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 594c9c4ad396..1d3c81fdad92 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -113,6 +113,7 @@ struct intel_ring_buffer { * Do we have some not yet emitted requests outstanding? */ u32 outstanding_lazy_request; + bool gpu_caches_dirty; wait_queue_head_t irq_queue; -- cgit v1.2.3 From a0c4da24eafb32a3ce44f37b7c3412c6ffb6e37c Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 15 Jun 2012 11:55:13 -0700 Subject: drm/i915: ValleyView mode setting limits and PLL functions Add some VLV limit structures and update the PLL code. v2: resolve conflicts, Vijay to re-post with PLL valid checks and fixed limits v3: re-add dpio write function v4: squash in Vijay's fixes for the PLL limits and clean up the m/n finder Signed-off-by: Shobhit Kumar Signed-off-by: Vijay Purushothaman Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_display.c | 240 ++++++++++++++++++++++++++++++++++- 2 files changed, 239 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 782e5d1dc218..b6f5f1040d77 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -904,6 +904,7 @@ #define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ #define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */ +#define DPLL_LOCK_VLV (1<<15) #define DPLL_INTEGRATED_CLOCK_VLV (1<<13) #define SRX_INDEX 0x3c4 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 97301621f48a..7794c1a630c0 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -98,6 +98,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock); +static bool +intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *match_clock, + intel_clock_t *best_clock); + static inline u32 /* units of 100MHz */ intel_fdi_link_freq(struct drm_device *dev) { @@ -359,6 +364,48 @@ static const intel_limit_t intel_limits_ironlake_display_port = { .find_pll = intel_find_pll_ironlake_dp, }; +static const intel_limit_t intel_limits_vlv_dac = { + .dot = { .min = 25000, .max = 270000 }, + .vco = { .min = 4000000, .max = 6000000 }, + .n = { .min = 1, .max = 7 }, + .m = { .min = 22, .max = 450 }, /* guess */ + .m1 = { .min = 2, .max = 3 }, + .m2 = { .min = 11, .max = 156 }, + .p = { .min = 10, .max = 30 }, + .p1 = { .min = 2, .max = 3 }, + .p2 = { .dot_limit = 270000, + .p2_slow = 2, .p2_fast = 20 }, + .find_pll = intel_vlv_find_best_pll, +}; + +static const intel_limit_t intel_limits_vlv_hdmi = { + .dot = { .min = 20000, .max = 165000 }, + .vco = { .min = 5994000, .max = 4000000 }, + .n = { .min = 1, .max = 7 }, + .m = { .min = 60, .max = 300 }, /* guess */ + .m1 = { .min = 2, .max = 3 }, + .m2 = { .min = 11, .max = 156 }, + .p = { .min = 10, .max = 30 }, + .p1 = { .min = 2, .max = 3 }, + .p2 = { .dot_limit = 270000, + .p2_slow = 2, .p2_fast = 20 }, + .find_pll = intel_vlv_find_best_pll, +}; + +static const intel_limit_t intel_limits_vlv_dp = { + .dot = { .min = 162000, .max = 270000 }, + .vco = { .min = 5994000, .max = 4000000 }, + .n = { .min = 1, .max = 7 }, + .m = { .min = 60, .max = 300 }, /* guess */ + .m1 = { .min = 2, .max = 3 }, + .m2 = { .min = 11, .max = 156 }, + .p = { .min = 10, .max = 30 }, + .p1 = { .min = 2, .max = 3 }, + .p2 = { .dot_limit = 270000, + .p2_slow = 2, .p2_fast = 20 }, + .find_pll = intel_vlv_find_best_pll, +}; + u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg) { unsigned long flags; @@ -384,6 +431,28 @@ out_unlock: return val; } +static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg, + u32 val) +{ + unsigned long flags; + + spin_lock_irqsave(&dev_priv->dpio_lock, flags); + if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { + DRM_ERROR("DPIO idle wait timed out\n"); + goto out_unlock; + } + + I915_WRITE(DPIO_DATA, val); + I915_WRITE(DPIO_REG, reg); + I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID | + DPIO_BYTE); + if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) + DRM_ERROR("DPIO write wait timed out\n"); + +out_unlock: + spin_unlock_irqrestore(&dev_priv->dpio_lock, flags); +} + static void vlv_init_dpio(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -510,6 +579,13 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) limit = &intel_limits_pineview_lvds; else limit = &intel_limits_pineview_sdvo; + } else if (IS_VALLEYVIEW(dev)) { + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) + limit = &intel_limits_vlv_dac; + else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) + limit = &intel_limits_vlv_hdmi; + else + limit = &intel_limits_vlv_dp; } else if (!IS_GEN2(dev)) { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &intel_limits_i9xx_lvds; @@ -783,6 +859,73 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, memcpy(best_clock, &clock, sizeof(intel_clock_t)); return true; } +static bool +intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *match_clock, + intel_clock_t *best_clock) +{ + u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2; + u32 m, n, fastclk; + u32 updrate, minupdate, fracbits, p; + unsigned long bestppm, ppm, absppm; + int dotclk, flag; + + dotclk = target * 1000; + bestppm = 1000000; + ppm = absppm = 0; + fastclk = dotclk / (2*100); + updrate = 0; + minupdate = 19200; + fracbits = 1; + n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0; + bestm1 = bestm2 = bestp1 = bestp2 = 0; + + /* based on hardware requirement, prefer smaller n to precision */ + for (n = limit->n.min; n <= ((refclk) / minupdate); n++) { + updrate = refclk / n; + for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) { + for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) { + if (p2 > 10) + p2 = p2 - 1; + p = p1 * p2; + /* based on hardware requirement, prefer bigger m1,m2 values */ + for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) { + m2 = (((2*(fastclk * p * n / m1 )) + + refclk) / (2*refclk)); + m = m1 * m2; + vco = updrate * m; + if (vco >= limit->vco.min && vco < limit->vco.max) { + ppm = 1000000 * ((vco / p) - fastclk) / fastclk; + absppm = (ppm > 0) ? ppm : (-ppm); + if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) { + bestppm = 0; + flag = 1; + } + if (absppm < bestppm - 10) { + bestppm = absppm; + flag = 1; + } + if (flag) { + bestn = n; + bestm1 = m1; + bestm2 = m2; + bestp1 = p1; + bestp2 = p2; + flag = 0; + } + } + } + } + } + } + best_clock->n = bestn; + best_clock->m1 = bestm1; + best_clock->m2 = bestm2; + best_clock->p1 = bestp1; + best_clock->p2 = bestp2; + + return true; +} static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe) { @@ -1293,7 +1436,7 @@ static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) u32 val; /* No really, not for ILK+ */ - BUG_ON(dev_priv->info->gen >= 5); + BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5); /* PLL is protected by panel, make sure we can write it */ if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) @@ -3672,13 +3815,37 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, return display_bpc != bpc; } +static int vlv_get_refclk(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int refclk = 27000; /* for DP & HDMI */ + + return 100000; /* only one validated so far */ + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { + refclk = 96000; + } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + if (intel_panel_use_ssc(dev_priv)) + refclk = 100000; + else + refclk = 96000; + } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { + refclk = 100000; + } + + return refclk; +} + static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; int refclk; - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && + if (IS_VALLEYVIEW(dev)) { + refclk = vlv_get_refclk(crtc); + } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { refclk = dev_priv->lvds_ssc_freq * 1000; DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", @@ -3793,6 +3960,72 @@ static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock, I915_WRITE(LVDS, temp); } +static void vlv_update_pll(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + intel_clock_t *clock, intel_clock_t *reduced_clock, + int refclk, int num_connectors) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + u32 dpll, mdiv, pdiv; + u32 bestn, bestm1, bestm2, bestp1, bestp2; + bool is_hdmi; + + is_hdmi = intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); + + bestn = clock->n; + bestm1 = clock->m1; + bestm2 = clock->m2; + bestp1 = clock->p1; + bestp2 = clock->p2; + + /* Enable DPIO clock input */ + dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | + DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; + I915_WRITE(DPLL(pipe), dpll); + POSTING_READ(DPLL(pipe)); + + mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); + mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); + mdiv |= ((bestn << DPIO_N_SHIFT)); + mdiv |= (1 << DPIO_POST_DIV_SHIFT); + mdiv |= (1 << DPIO_K_SHIFT); + mdiv |= DPIO_ENABLE_CALIBRATION; + intel_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv); + + intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000); + + pdiv = DPIO_REFSEL_OVERRIDE | (5 << DPIO_PLL_MODESEL_SHIFT) | + (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) | + (8 << DPIO_DRIVER_CTL_SHIFT) | (5 << DPIO_CLK_BIAS_CTL_SHIFT); + intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv); + + intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x009f0051); + + dpll |= DPLL_VCO_ENABLE; + I915_WRITE(DPLL(pipe), dpll); + POSTING_READ(DPLL(pipe)); + if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) + DRM_ERROR("DPLL %d failed to lock\n", pipe); + + if (is_hdmi) { + u32 temp = intel_mode_get_pixel_multiplier(adjusted_mode); + + if (temp > 1) + temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; + else + temp = 0; + + I915_WRITE(DPLL_MD(pipe), temp); + POSTING_READ(DPLL_MD(pipe)); + } + + intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x641); /* ??? */ +} + static void i9xx_update_pll(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, @@ -4050,6 +4283,9 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, if (IS_GEN2(dev)) i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors); + else if (IS_VALLEYVIEW(dev)) + vlv_update_pll(crtc, mode,adjusted_mode, &clock, NULL, + refclk, num_connectors); else i9xx_update_pll(crtc, mode, adjusted_mode, &clock, has_reduced_clock ? &reduced_clock : NULL, -- cgit v1.2.3 From 98364379e1d967b8a070070797498c3708e73eb7 Mon Sep 17 00:00:00 2001 From: Shobhit Kumar Date: Fri, 15 Jun 2012 11:55:14 -0700 Subject: drm/i915: Enable DP panel power sequencing for ValleyView VLV supports two dp panels, there are two set of panel power sequence registers which needed to be programmed based on the configured pipe. This patch add supports for the same Acked-by: Acked-by: Ben Widawsky Signed-off-by: Beeresh G Reviewed-by: Vijay Purushothaman Reviewed-by: Jesse Barnes Signed-off-by: Jesse Barnes [danvet: Drop the lone hunk and only keep the register definitions - I loathe incomplete bandaids. Also add a comment that this is for vlv.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index b6f5f1040d77..122256fc0221 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3896,6 +3896,19 @@ #define PCH_LVDS 0xe1180 #define LVDS_DETECTED (1 << 1) +/* vlv has 2 sets of panel control regs. */ +#define PIPEA_PP_STATUS 0x61200 +#define PIPEA_PP_CONTROL 0x61204 +#define PIPEA_PP_ON_DELAYS 0x61208 +#define PIPEA_PP_OFF_DELAYS 0x6120c +#define PIPEA_PP_DIVISOR 0x61210 + +#define PIPEB_PP_STATUS 0x61300 +#define PIPEB_PP_CONTROL 0x61304 +#define PIPEB_PP_ON_DELAYS 0x61308 +#define PIPEB_PP_OFF_DELAYS 0x6130c +#define PIPEB_PP_DIVISOR 0x61310 + #define PCH_PP_STATUS 0xc7200 #define PCH_PP_CONTROL 0xc7204 #define PANEL_UNLOCK_REGS (0xabcd << 16) -- cgit v1.2.3 From 7d2c24e8cd997867936b41a94a9984382af1566a Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 15 Jun 2012 11:55:15 -0700 Subject: drm/i915: add ValleyView specific CRT detect function Might be able to merge this back in at some point, but we're seeing bugs with ADPA based detection, so keep it separate for now with explicit hotplug trigger usage. v2: drop superfluous debug message v3: comment forced detection, need to debug (Eugeni) Reviewed-by: Eugeni Dodonov Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_crt.c | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 5978490dac92..71b412a6fbd5 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -230,6 +230,42 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) return ret; } +static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 adpa; + bool ret; + u32 save_adpa; + + save_adpa = adpa = I915_READ(ADPA); + DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); + + adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; + + I915_WRITE(ADPA, adpa); + + if (wait_for((I915_READ(ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, + 1000)) { + DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); + I915_WRITE(ADPA, save_adpa); + } + + /* Check the status to see if both blue and green are on now */ + adpa = I915_READ(ADPA); + if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) + ret = true; + else + ret = false; + + DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret); + + /* FIXME: debug force function and remove */ + ret = true; + + return ret; +} + /** * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence. * @@ -249,6 +285,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) if (HAS_PCH_SPLIT(dev)) return intel_ironlake_crt_detect_hotplug(connector); + if (IS_VALLEYVIEW(dev)) + return valleyview_crt_detect_hotplug(connector); + /* * On 4 series desktop, CRT detect sequence need to be done twice * to get a reliable result. -- cgit v1.2.3 From 4a87d65d54ffc76e1d6f7e2124354997b66bd81c Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 15 Jun 2012 11:55:16 -0700 Subject: drm/i915: add HDMI and DP port enumeration on ValleyView ValleyView is similar to IbexPeak here, but with different register offsets. v2: use SDVOB instead ov VLV_HDMIB (Daniel) drop unnecessary eDP check in DP_C init (Daniel) eDP support will be coming later from Shobit. Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 1 - drivers/gpu/drm/i915/intel_display.c | 17 +++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 122256fc0221..f075139ed9b8 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3865,7 +3865,6 @@ #define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) /* or SDVOB */ -#define VLV_HDMIB 0x61140 #define HDMIB 0xe1140 #define PORT_ENABLE (1 << 31) #define TRANSCODER(pipe) ((pipe) << 30) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 7794c1a630c0..06721c0e9f98 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6768,7 +6768,24 @@ static void intel_setup_outputs(struct drm_device *dev) if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) intel_dp_init(dev, PCH_DP_D); + } else if (IS_VALLEYVIEW(dev)) { + int found; + + if (I915_READ(SDVOB) & PORT_DETECTED) { + /* SDVOB multiplex with HDMIB */ + found = intel_sdvo_init(dev, SDVOB, true); + if (!found) + intel_hdmi_init(dev, SDVOB); + if (!found && (I915_READ(DP_B) & DP_DETECTED)) + intel_dp_init(dev, DP_B); + } + + if (I915_READ(SDVOC) & PORT_DETECTED) + intel_hdmi_init(dev, SDVOC); + /* Shares lanes with HDMI on SDVOC */ + if (I915_READ(DP_C) & DP_DETECTED) + intel_dp_init(dev, DP_C); } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { bool found = false; -- cgit v1.2.3 From f7dff0c9cbb89e9c406c0ca47f32129b61721174 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 15 Jun 2012 11:55:17 -0700 Subject: drm/i915: access VLV regs through read/write switch Since the offsets have all moved around. v2: switch IS_DISPLAYREG and IS_VALLEYVIEW checks around since the latter is cheaper (Daniel) bail out early in IS_DISPLAYREG if the reg is in the new range (Daniel) Signed-off-by: Jesse Barnes [danvet: Fixup if cascading fail that broke HAS_FORCEWAKE machines.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 86 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 85 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index d7dd60bb2745..d1306c0f44f9 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1146,6 +1146,84 @@ MODULE_LICENSE("GPL and additional rights"); ((reg) != FORCEWAKE)) && \ (!IS_VALLEYVIEW((dev_priv)->dev)) +static bool IS_DISPLAYREG(u32 reg) +{ + /* + * This should make it easier to transition modules over to the + * new register block scheme, since we can do it incrementally. + */ + if (reg >= 0x180000) + return false; + + if (reg >= RENDER_RING_BASE && + reg < RENDER_RING_BASE + 0xff) + return false; + if (reg >= GEN6_BSD_RING_BASE && + reg < GEN6_BSD_RING_BASE + 0xff) + return false; + if (reg >= BLT_RING_BASE && + reg < BLT_RING_BASE + 0xff) + return false; + + if (reg == PGTBL_ER) + return false; + + if (reg >= IPEIR_I965 && + reg < HWSTAM) + return false; + + if (reg == MI_MODE) + return false; + + if (reg == GFX_MODE_GEN7) + return false; + + if (reg == RENDER_HWS_PGA_GEN7 || + reg == BSD_HWS_PGA_GEN7 || + reg == BLT_HWS_PGA_GEN7) + return false; + + if (reg == GEN6_BSD_SLEEP_PSMI_CONTROL || + reg == GEN6_BSD_RNCID) + return false; + + if (reg == GEN6_BLITTER_ECOSKPD) + return false; + + if (reg >= 0x4000c && + reg <= 0x4002c) + return false; + + if (reg >= 0x4f000 && + reg <= 0x4f08f) + return false; + + if (reg >= 0x4f100 && + reg <= 0x4f11f) + return false; + + if (reg >= VLV_MASTER_IER && + reg <= GEN6_PMIER) + return false; + + if (reg >= FENCE_REG_SANDYBRIDGE_0 && + reg < (FENCE_REG_SANDYBRIDGE_0 + (16*8))) + return false; + + if (reg >= VLV_IIR_RW && + reg <= VLV_ISR) + return false; + + if (reg == FORCEWAKE_VLV || + reg == FORCEWAKE_ACK_VLV) + return false; + + if (reg == GEN6_GDRST) + return false; + + return true; +} + #define __i915_read(x, y) \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ u##x val = 0; \ @@ -1158,6 +1236,8 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ if (dev_priv->forcewake_count == 0) \ dev_priv->display.force_wake_put(dev_priv); \ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ + } else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ + val = read##y(dev_priv->regs + reg + 0x180000); \ } else { \ val = read##y(dev_priv->regs + reg); \ } \ @@ -1178,7 +1258,11 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ } \ - write##y(val, dev_priv->regs + reg); \ + if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ + write##y(val, dev_priv->regs + reg + 0x180000); \ + } else { \ + write##y(val, dev_priv->regs + reg); \ + } \ if (unlikely(__fifo_ret)) { \ gen6_gt_check_fifodbg(dev_priv); \ } \ -- cgit v1.2.3 From bd9e8413c9bdfc36b5b8ce6ed86843d157c17099 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 15 Jun 2012 11:55:18 -0700 Subject: drm/i915: VLV VGA port only handles on & off, like PCH VGA Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_crt.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 71b412a6fbd5..61d55d3141c7 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -88,6 +88,9 @@ static void gmch_crt_dpms(struct drm_encoder *encoder, int mode) temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); temp &= ~ADPA_DAC_ENABLE; + if (IS_VALLEYVIEW(dev) && mode != DRM_MODE_DPMS_ON) + mode = DRM_MODE_DPMS_OFF; + switch (mode) { case DRM_MODE_DPMS_ON: temp |= ADPA_DAC_ENABLE; -- cgit v1.2.3 From e87c46993e30e8fe2e7a0981a532abe8bba07e62 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 15 Jun 2012 11:55:19 -0700 Subject: agp/intel: allow cacheable and GDFT PTEs on ValleyView The PTE format is similar to SNB, but we don't support an MLC and don't need chipset flushing. Note: I have my questions whether this is right, given that MLC died for snb & ivb, that ivb has grown a L3$ cache instead (which vlv seems to have, too) and that the LLC bit here isn't actually LLC, but just means 'snoop cpu caches'. But I plan to burn this all with the heat of a thousands suns in my gtt rework, so who cares ;-) Signed-off-by: Jesse Barnes [danvet: Added note.] Signed-off-by: Daniel Vetter --- drivers/char/agp/intel-gtt.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 419a25eeefd8..692610e597db 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1183,9 +1183,17 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry, static void valleyview_write_entry(dma_addr_t addr, unsigned int entry, unsigned int flags) { + unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT; + unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; u32 pte_flags; - pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; + if (type_mask == AGP_USER_MEMORY) + pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; + else { + pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; + if (gfdt) + pte_flags |= GEN6_PTE_GFDT; + } /* gen6 has bit11-4 for physical addr bit39-32 */ addr |= (addr >> 28) & 0xff0; @@ -1380,7 +1388,6 @@ static const struct intel_gtt_driver valleyview_gtt_driver = { .write_entry = valleyview_write_entry, .dma_mask_size = 40, .check_flags = gen6_check_flags, - .chipset_flush = i9xx_chipset_flush, }; /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of -- cgit v1.2.3 From e597dad846bafe6c9802c9ae2a94d755c5567de9 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 15 Jun 2012 11:55:22 -0700 Subject: agp/intel: use correct GTT offset on VLV VLV is a gen7 device, but we don't currently handle that in the switch. So add it and write the PTEs correctly. Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/char/agp/intel-gtt.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 692610e597db..9ed92ef5829b 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1253,6 +1253,7 @@ static int i9xx_setup(void) switch (INTEL_GTT_GEN) { case 5: case 6: + case 7: gtt_offset = MB(2); break; case 4: -- cgit v1.2.3 From 935536096331177ab4c6c52e74df8e58f8912c4b Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 15 Jun 2012 11:55:23 -0700 Subject: drm/i915: don't enable PPGTT on VLV yet Needs some more work and testing. Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 82e657eafd55..24ef5d77927f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1091,7 +1091,7 @@ struct drm_i915_file_private { #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) -#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6) +#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev)) #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) -- cgit v1.2.3 From 31acc7f59aac34477423a7dde654ae998b48d666 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 20 Jun 2012 10:53:11 -0700 Subject: drm/i915: support page flipping on ValleyView And restructure the IRQ handling a little. We can use pipestat for most things, and make sure we don't affect pipe events when enabling and disabling vblank interupts. We can leave vblank interrupts masked but enabled so we're not dependent on the first client to toggle the disable timer. We can also mask all render based interrupts, since the ring code will handle unmasking them for us. v2: roll in vblank masking, remove unneeded variable (Daniel) Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_irq.c | 97 +++++++++++++++++++---------------------- 1 file changed, 46 insertions(+), 51 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index add0aaeadecd..84975e1e1f05 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -513,15 +513,10 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) unsigned long irqflags; int pipe; u32 pipe_stats[I915_MAX_PIPES]; - u32 vblank_status; - int vblank = 0; bool blc_event; atomic_inc(&dev_priv->irq_received); - vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS | - PIPE_VBLANK_INTERRUPT_STATUS; - while (true) { iir = I915_READ(VLV_IIR); gt_iir = I915_READ(GTIIR); @@ -551,6 +546,16 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) } spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + for_each_pipe(pipe) { + if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) + drm_handle_vblank(dev, pipe); + + if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { + intel_prepare_page_flip(dev, pipe); + intel_finish_page_flip(dev, pipe); + } + } + /* Consume port. Then clear IIR or we'll miss events */ if (iir & I915_DISPLAY_PORT_INTERRUPT) { u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); @@ -565,19 +570,6 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) I915_READ(PORT_HOTPLUG_STAT); } - - if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) { - drm_handle_vblank(dev, 0); - vblank++; - intel_finish_page_flip(dev, 0); - } - - if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) { - drm_handle_vblank(dev, 1); - vblank++; - intel_finish_page_flip(dev, 0); - } - if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) blc_event = true; @@ -1479,23 +1471,20 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; - u32 dpfl, imr; + u32 imr; if (!i915_pipe_enabled(dev, pipe)) return -EINVAL; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - dpfl = I915_READ(VLV_DPFLIPSTAT); imr = I915_READ(VLV_IMR); - if (pipe == 0) { - dpfl |= PIPEA_VBLANK_INT_EN; + if (pipe == 0) imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; - } else { - dpfl |= PIPEA_VBLANK_INT_EN; + else imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; - } - I915_WRITE(VLV_DPFLIPSTAT, dpfl); I915_WRITE(VLV_IMR, imr); + i915_enable_pipestat(dev_priv, pipe, + PIPE_START_VBLANK_INTERRUPT_ENABLE); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); return 0; @@ -1545,20 +1534,17 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; - u32 dpfl, imr; + u32 imr; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - dpfl = I915_READ(VLV_DPFLIPSTAT); + i915_disable_pipestat(dev_priv, pipe, + PIPE_START_VBLANK_INTERRUPT_ENABLE); imr = I915_READ(VLV_IMR); - if (pipe == 0) { - dpfl &= ~PIPEA_VBLANK_INT_EN; + if (pipe == 0) imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; - } else { - dpfl &= ~PIPEB_VBLANK_INT_EN; + else imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; - } I915_WRITE(VLV_IMR, imr); - I915_WRITE(VLV_DPFLIPSTAT, dpfl); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } @@ -1892,16 +1878,24 @@ static int ivybridge_irq_postinstall(struct drm_device *dev) static int valleyview_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 render_irqs; u32 enable_mask; u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); + u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; u16 msid; enable_mask = I915_DISPLAY_PORT_INTERRUPT; - enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | + enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; - dev_priv->irq_mask = ~enable_mask; + /* + *Leave vblank interrupts masked initially. enable/disable will + * toggle them based on usage. + */ + dev_priv->irq_mask = (~enable_mask) | + I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | + I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; dev_priv->pipestat[0] = 0; dev_priv->pipestat[1] = 0; @@ -1920,26 +1914,27 @@ static int valleyview_irq_postinstall(struct drm_device *dev) I915_WRITE(PIPESTAT(1), 0xffff); POSTING_READ(VLV_IER); + i915_enable_pipestat(dev_priv, 0, pipestat_enable); + i915_enable_pipestat(dev_priv, 1, pipestat_enable); + I915_WRITE(VLV_IIR, 0xffffffff); I915_WRITE(VLV_IIR, 0xffffffff); - render_irqs = GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT | - GT_GEN6_BLT_CS_ERROR_INTERRUPT | - GT_GEN6_BLT_USER_INTERRUPT | - GT_GEN6_BSD_USER_INTERRUPT | - GT_GEN6_BSD_CS_ERROR_INTERRUPT | - GT_GEN7_L3_PARITY_ERROR_INTERRUPT | - GT_PIPE_NOTIFY | - GT_RENDER_CS_ERROR_INTERRUPT | - GT_SYNC_STATUS | - GT_USER_INTERRUPT; - - dev_priv->gt_irq_mask = ~render_irqs; + dev_priv->gt_irq_mask = ~0; I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(GTIIR, I915_READ(GTIIR)); - I915_WRITE(GTIMR, 0); - I915_WRITE(GTIER, render_irqs); + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT | + GT_GEN6_BLT_CS_ERROR_INTERRUPT | + GT_GEN6_BLT_USER_INTERRUPT | + GT_GEN6_BSD_USER_INTERRUPT | + GT_GEN6_BSD_CS_ERROR_INTERRUPT | + GT_GEN7_L3_PARITY_ERROR_INTERRUPT | + GT_PIPE_NOTIFY | + GT_RENDER_CS_ERROR_INTERRUPT | + GT_SYNC_STATUS | + GT_USER_INTERRUPT); POSTING_READ(GTIER); /* ack & enable invalid PTE error interrupts */ -- cgit v1.2.3 From 7983117f0ba2cec585f1c643297de5ea15ed9920 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 20 Jun 2012 10:53:12 -0700 Subject: drm/i915: enable display messages to GT on ValleyView Enable the on-chip messaging between the display engine and the GT. v2: use bit definitions for DPFLIPSTAT reg Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 4 ++-- drivers/gpu/drm/i915/intel_pm.c | 13 +++++++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f075139ed9b8..0a61481cd2c2 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2653,13 +2653,13 @@ #define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) #define VLV_DPFLIPSTAT 0x70028 -#define PIPEB_LINE_COMPARE_STATUS (1<<29) +#define PIPEB_LINE_COMPARE_INT_EN (1<<29) #define PIPEB_HLINE_INT_EN (1<<28) #define PIPEB_VBLANK_INT_EN (1<<27) #define SPRITED_FLIPDONE_INT_EN (1<<26) #define SPRITEC_FLIPDONE_INT_EN (1<<25) #define PLANEB_FLIPDONE_INT_EN (1<<24) -#define PIPEA_LINE_COMPARE_STATUS (1<<21) +#define PIPEA_LINE_COMPARE_INT_EN (1<<21) #define PIPEA_HLINE_INT_EN (1<<20) #define PIPEA_VBLANK_INT_EN (1<<19) #define SPRITEB_FLIPDONE_INT_EN (1<<18) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 17c16f026244..7504fbce05cc 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3528,6 +3528,19 @@ static void valleyview_init_clock_gating(struct drm_device *dev) I915_WRITE(CACHE_MODE_1, _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); + + /* + * On ValleyView, the GUnit needs to signal the GT + * when flip and other events complete. So enable + * all the GUnit->GT interrupts here + */ + I915_WRITE(VLV_DPFLIPSTAT, PIPEB_LINE_COMPARE_INT_EN | + PIPEB_HLINE_INT_EN | PIPEB_VBLANK_INT_EN | + SPRITED_FLIPDONE_INT_EN | SPRITEC_FLIPDONE_INT_EN | + PLANEB_FLIPDONE_INT_EN | PIPEA_LINE_COMPARE_INT_EN | + PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN | + SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN | + PLANEA_FLIPDONE_INT_EN); } static void g4x_init_clock_gating(struct drm_device *dev) -- cgit v1.2.3 From ff049b6ce21d2814451afd4a116d001712e0116b Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 20 Jun 2012 10:53:13 -0700 Subject: drm/i915: bind driver to ValleyView chipsets With the code in place, we can bind the driver, should make bisect possible. Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index d1306c0f44f9..05adbf23951a 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -352,6 +352,9 @@ static const struct pci_device_id pciidlist[] = { /* aka */ INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */ INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */ INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */ + INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), + INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), + INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), {0, 0, 0} }; -- cgit v1.2.3 From 876dc9f32907e57e0298bcd0f1607cb7a2582f63 Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 8 May 2012 14:24:01 +0200 Subject: drm/radeon: remove radeon_fence_create MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It is completely unnecessary to create fences before they are emitted, so remove it and a bunch of checks if fences are emitted or not. Signed-off-by: Christian König Reviewed-by: Jerome Glisse --- drivers/gpu/drm/radeon/evergreen.c | 2 +- drivers/gpu/drm/radeon/ni.c | 2 +- drivers/gpu/drm/radeon/r100.c | 4 +- drivers/gpu/drm/radeon/r200.c | 4 +- drivers/gpu/drm/radeon/r600.c | 4 +- drivers/gpu/drm/radeon/r600_blit_kms.c | 6 +-- drivers/gpu/drm/radeon/radeon.h | 11 +++--- drivers/gpu/drm/radeon/radeon_asic.h | 8 ++-- drivers/gpu/drm/radeon/radeon_benchmark.c | 10 +---- drivers/gpu/drm/radeon/radeon_fence.c | 42 ++++++-------------- drivers/gpu/drm/radeon/radeon_ring.c | 19 ++++----- drivers/gpu/drm/radeon/radeon_sa.c | 2 +- drivers/gpu/drm/radeon/radeon_test.c | 66 ++++++++++++------------------- drivers/gpu/drm/radeon/radeon_ttm.c | 30 ++++++-------- drivers/gpu/drm/radeon/si.c | 6 +-- 15 files changed, 86 insertions(+), 130 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 7fb3d2e0434c..8e40ba4a9b79 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1371,7 +1371,7 @@ void evergreen_mc_program(struct radeon_device *rdev) */ void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) { - struct radeon_ring *ring = &rdev->ring[ib->fence->ring]; + struct radeon_ring *ring = &rdev->ring[ib->ring]; /* set to DX10/11 mode */ radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index b7bf18e40215..2366be3df074 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -850,7 +850,7 @@ void cayman_fence_ring_emit(struct radeon_device *rdev, void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) { - struct radeon_ring *ring = &rdev->ring[ib->fence->ring]; + struct radeon_ring *ring = &rdev->ring[ib->ring]; /* set to DX10/11 mode */ radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index fb44e7e49083..415b7d8fbba2 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -883,7 +883,7 @@ int r100_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct radeon_fence *fence) + struct radeon_fence **fence) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; uint32_t cur_pages; @@ -947,7 +947,7 @@ int r100_copy_blit(struct radeon_device *rdev, RADEON_WAIT_HOST_IDLECLEAN | RADEON_WAIT_DMA_GUI_IDLE); if (fence) { - r = radeon_fence_emit(rdev, fence); + r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); } radeon_ring_unlock_commit(rdev, ring); return r; diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index a26144d01207..f0889259eb08 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c @@ -85,7 +85,7 @@ int r200_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct radeon_fence *fence) + struct radeon_fence **fence) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; uint32_t size; @@ -120,7 +120,7 @@ int r200_copy_dma(struct radeon_device *rdev, radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE); if (fence) { - r = radeon_fence_emit(rdev, fence); + r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); } radeon_ring_unlock_commit(rdev, ring); return r; diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index bff627293812..a80e61e138db 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2309,7 +2309,7 @@ int r600_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct radeon_fence *fence) + struct radeon_fence **fence) { struct radeon_sa_bo *vb = NULL; int r; @@ -2607,7 +2607,7 @@ void r600_fini(struct radeon_device *rdev) */ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) { - struct radeon_ring *ring = &rdev->ring[ib->fence->ring]; + struct radeon_ring *ring = &rdev->ring[ib->ring]; /* FIXME: implement */ radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 03b6e0d3d503..02f4eebf805f 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -703,20 +703,20 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages, return 0; } -void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence, +void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence, struct radeon_sa_bo *vb) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; int r; - r = radeon_fence_emit(rdev, fence); + r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); if (r) { radeon_ring_unlock_undo(rdev, ring); return; } radeon_ring_unlock_commit(rdev, ring); - radeon_sa_bo_free(rdev, &vb, fence); + radeon_sa_bo_free(rdev, &vb, *fence); } void r600_kms_blit_copy(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index fefcca55c1eb..e2feddd91df5 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -113,7 +113,6 @@ extern int radeon_lockup_timeout; /* fence seq are set to this number when signaled */ #define RADEON_FENCE_SIGNALED_SEQ 0LL -#define RADEON_FENCE_NOTEMITED_SEQ (~0LL) /* internal ring indices */ /* r1xx+ has gfx CP ring */ @@ -277,8 +276,7 @@ struct radeon_fence { int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring); int radeon_fence_driver_init(struct radeon_device *rdev); void radeon_fence_driver_fini(struct radeon_device *rdev); -int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence, int ring); -int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence); +int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring); void radeon_fence_process(struct radeon_device *rdev, int ring); bool radeon_fence_signaled(struct radeon_fence *fence); int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); @@ -630,6 +628,7 @@ struct radeon_ib { uint32_t length_dw; uint64_t gpu_addr; uint32_t *ptr; + int ring; struct radeon_fence *fence; unsigned vm_id; bool is_const_ib; @@ -1192,20 +1191,20 @@ struct radeon_asic { uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct radeon_fence *fence); + struct radeon_fence **fence); u32 blit_ring_index; int (*dma)(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct radeon_fence *fence); + struct radeon_fence **fence); u32 dma_ring_index; /* method used for bo copy */ int (*copy)(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct radeon_fence *fence); + struct radeon_fence **fence); /* ring used for bo copies */ u32 copy_ring_index; } copy; diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index e76a941ef14e..8cdf075aacfd 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -79,7 +79,7 @@ int r100_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct radeon_fence *fence); + struct radeon_fence **fence); int r100_set_surface_reg(struct radeon_device *rdev, int reg, uint32_t tiling_flags, uint32_t pitch, uint32_t offset, uint32_t obj_size); @@ -144,7 +144,7 @@ extern int r200_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, - struct radeon_fence *fence); + struct radeon_fence **fence); void r200_set_safe_registers(struct radeon_device *rdev); /* @@ -318,7 +318,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); int r600_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, struct radeon_fence *fence); + unsigned num_gpu_pages, struct radeon_fence **fence); void r600_hpd_init(struct radeon_device *rdev); void r600_hpd_fini(struct radeon_device *rdev); bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); @@ -364,7 +364,7 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); /* r600 blit */ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages, struct radeon_sa_bo **vb); -void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence, +void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence, struct radeon_sa_bo *vb); void r600_kms_blit_copy(struct radeon_device *rdev, u64 src_gpu_addr, u64 dst_gpu_addr, diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 364f5b1a04b9..bedda9caadd9 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c @@ -45,20 +45,14 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, for (i = 0; i < n; i++) { switch (flag) { case RADEON_BENCHMARK_COPY_DMA: - r = radeon_fence_create(rdev, &fence, radeon_copy_dma_ring_index(rdev)); - if (r) - return r; r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, - fence); + &fence); break; case RADEON_BENCHMARK_COPY_BLIT: - r = radeon_fence_create(rdev, &fence, radeon_copy_blit_ring_index(rdev)); - if (r) - return r; r = radeon_copy_blit(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, - fence); + &fence); break; default: DRM_ERROR("Unknown copy method\n"); diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 11f5f402d22c..401d346a05c0 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -61,15 +61,21 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring) return seq; } -int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) +int radeon_fence_emit(struct radeon_device *rdev, + struct radeon_fence **fence, + int ring) { /* we are protected by the ring emission mutex */ - if (fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) { - return 0; + *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); + if ((*fence) == NULL) { + return -ENOMEM; } - fence->seq = ++rdev->fence_drv[fence->ring].seq; - radeon_fence_ring_emit(rdev, fence->ring, fence); - trace_radeon_fence_emit(rdev->ddev, fence->seq); + kref_init(&((*fence)->kref)); + (*fence)->rdev = rdev; + (*fence)->seq = ++rdev->fence_drv[ring].seq; + (*fence)->ring = ring; + radeon_fence_ring_emit(rdev, ring, *fence); + trace_radeon_fence_emit(rdev->ddev, (*fence)->seq); return 0; } @@ -138,25 +144,9 @@ static void radeon_fence_destroy(struct kref *kref) struct radeon_fence *fence; fence = container_of(kref, struct radeon_fence, kref); - fence->seq = RADEON_FENCE_NOTEMITED_SEQ; kfree(fence); } -int radeon_fence_create(struct radeon_device *rdev, - struct radeon_fence **fence, - int ring) -{ - *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); - if ((*fence) == NULL) { - return -ENOMEM; - } - kref_init(&((*fence)->kref)); - (*fence)->rdev = rdev; - (*fence)->seq = RADEON_FENCE_NOTEMITED_SEQ; - (*fence)->ring = ring; - return 0; -} - static bool radeon_fence_seq_signaled(struct radeon_device *rdev, u64 seq, unsigned ring) { @@ -176,10 +166,6 @@ bool radeon_fence_signaled(struct radeon_fence *fence) if (!fence) { return true; } - if (fence->seq == RADEON_FENCE_NOTEMITED_SEQ) { - WARN(1, "Querying an unemitted fence : %p !\n", fence); - return true; - } if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) { return true; } @@ -444,9 +430,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev, return 0; } - if (fences[i]->seq < RADEON_FENCE_NOTEMITED_SEQ) { - seq[i] = fences[i]->seq; - } + seq[i] = fences[i]->seq; } r = radeon_fence_wait_any_seq(rdev, seq, intr); diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 983658c91358..dd506c216d8f 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -49,13 +49,9 @@ int radeon_ib_get(struct radeon_device *rdev, int ring, dev_err(rdev->dev, "failed to get a new IB (%d)\n", r); return r; } - r = radeon_fence_create(rdev, &ib->fence, ring); - if (r) { - dev_err(rdev->dev, "failed to create fence for new IB (%d)\n", r); - radeon_sa_bo_free(rdev, &ib->sa_bo, NULL); - return r; - } + ib->ring = ring; + ib->fence = NULL; ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo); ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); ib->vm_id = 0; @@ -74,7 +70,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) { - struct radeon_ring *ring = &rdev->ring[ib->fence->ring]; + struct radeon_ring *ring = &rdev->ring[ib->ring]; int r = 0; if (!ib->length_dw || !ring->ready) { @@ -89,8 +85,13 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); return r; } - radeon_ring_ib_execute(rdev, ib->fence->ring, ib); - radeon_fence_emit(rdev, ib->fence); + radeon_ring_ib_execute(rdev, ib->ring, ib); + r = radeon_fence_emit(rdev, &ib->fence, ib->ring); + if (r) { + dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r); + radeon_ring_unlock_undo(rdev, ring); + return r; + } radeon_ring_unlock_commit(rdev, ring); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c index 32059b745728..81dbb5b946ef 100644 --- a/drivers/gpu/drm/radeon/radeon_sa.c +++ b/drivers/gpu/drm/radeon/radeon_sa.c @@ -349,7 +349,7 @@ void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo, sa_manager = (*sa_bo)->manager; spin_lock(&sa_manager->lock); - if (fence && fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) { + if (fence && !radeon_fence_signaled(fence)) { (*sa_bo)->fence = radeon_fence_ref(fence); list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[fence->ring]); diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index efff929ea49d..47e1535f2706 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -106,13 +106,7 @@ void radeon_test_moves(struct radeon_device *rdev) radeon_bo_kunmap(gtt_obj[i]); - r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX); - if (r) { - DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i); - goto out_cleanup; - } - - r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, fence); + r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); if (r) { DRM_ERROR("Failed GTT->VRAM copy %d\n", i); goto out_cleanup; @@ -155,13 +149,7 @@ void radeon_test_moves(struct radeon_device *rdev) radeon_bo_kunmap(vram_obj); - r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX); - if (r) { - DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i); - goto out_cleanup; - } - - r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, fence); + r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); if (r) { DRM_ERROR("Failed VRAM->GTT copy %d\n", i); goto out_cleanup; @@ -245,17 +233,6 @@ void radeon_test_ring_sync(struct radeon_device *rdev, int ridxB = radeon_ring_index(rdev, ringB); int r; - r = radeon_fence_create(rdev, &fence1, ridxA); - if (r) { - DRM_ERROR("Failed to create sync fence 1\n"); - goto out_cleanup; - } - r = radeon_fence_create(rdev, &fence2, ridxA); - if (r) { - DRM_ERROR("Failed to create sync fence 2\n"); - goto out_cleanup; - } - r = radeon_semaphore_create(rdev, &semaphore); if (r) { DRM_ERROR("Failed to create semaphore\n"); @@ -268,9 +245,19 @@ void radeon_test_ring_sync(struct radeon_device *rdev, goto out_cleanup; } radeon_semaphore_emit_wait(rdev, ridxA, semaphore); - radeon_fence_emit(rdev, fence1); + r = radeon_fence_emit(rdev, &fence1, ridxA); + if (r) { + DRM_ERROR("Failed to emit fence 1\n"); + radeon_ring_unlock_undo(rdev, ringA); + goto out_cleanup; + } radeon_semaphore_emit_wait(rdev, ridxA, semaphore); - radeon_fence_emit(rdev, fence2); + r = radeon_fence_emit(rdev, &fence2, ridxA); + if (r) { + DRM_ERROR("Failed to emit fence 2\n"); + radeon_ring_unlock_undo(rdev, ringA); + goto out_cleanup; + } radeon_ring_unlock_commit(rdev, ringA); mdelay(1000); @@ -342,17 +329,6 @@ void radeon_test_ring_sync2(struct radeon_device *rdev, bool sigA, sigB; int i, r; - r = radeon_fence_create(rdev, &fenceA, ridxA); - if (r) { - DRM_ERROR("Failed to create sync fence 1\n"); - goto out_cleanup; - } - r = radeon_fence_create(rdev, &fenceB, ridxB); - if (r) { - DRM_ERROR("Failed to create sync fence 2\n"); - goto out_cleanup; - } - r = radeon_semaphore_create(rdev, &semaphore); if (r) { DRM_ERROR("Failed to create semaphore\n"); @@ -365,7 +341,12 @@ void radeon_test_ring_sync2(struct radeon_device *rdev, goto out_cleanup; } radeon_semaphore_emit_wait(rdev, ridxA, semaphore); - radeon_fence_emit(rdev, fenceA); + r = radeon_fence_emit(rdev, &fenceA, ridxA); + if (r) { + DRM_ERROR("Failed to emit sync fence 1\n"); + radeon_ring_unlock_undo(rdev, ringA); + goto out_cleanup; + } radeon_ring_unlock_commit(rdev, ringA); r = radeon_ring_lock(rdev, ringB, 64); @@ -374,7 +355,12 @@ void radeon_test_ring_sync2(struct radeon_device *rdev, goto out_cleanup; } radeon_semaphore_emit_wait(rdev, ridxB, semaphore); - radeon_fence_emit(rdev, fenceB); + r = radeon_fence_emit(rdev, &fenceB, ridxB); + if (r) { + DRM_ERROR("Failed to create sync fence 2\n"); + radeon_ring_unlock_undo(rdev, ringB); + goto out_cleanup; + } radeon_ring_unlock_commit(rdev, ringB); mdelay(1000); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index c94a2257761f..2d36bdda9327 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -222,15 +222,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, { struct radeon_device *rdev; uint64_t old_start, new_start; - struct radeon_fence *fence, *old_fence; + struct radeon_fence *fence; struct radeon_semaphore *sem = NULL; - int r; + int r, ridx; rdev = radeon_get_rdev(bo->bdev); - r = radeon_fence_create(rdev, &fence, radeon_copy_ring_index(rdev)); - if (unlikely(r)) { - return r; - } + ridx = radeon_copy_ring_index(rdev); old_start = old_mem->start << PAGE_SHIFT; new_start = new_mem->start << PAGE_SHIFT; @@ -243,7 +240,6 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, break; default: DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); - radeon_fence_unref(&fence); return -EINVAL; } switch (new_mem->mem_type) { @@ -255,42 +251,38 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, break; default: DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); - radeon_fence_unref(&fence); return -EINVAL; } - if (!rdev->ring[radeon_copy_ring_index(rdev)].ready) { + if (!rdev->ring[ridx].ready) { DRM_ERROR("Trying to move memory with ring turned off.\n"); - radeon_fence_unref(&fence); return -EINVAL; } BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); /* sync other rings */ - old_fence = bo->sync_obj; - if (old_fence && old_fence->ring != fence->ring - && !radeon_fence_signaled(old_fence)) { + fence = bo->sync_obj; + if (fence && fence->ring != ridx + && !radeon_fence_signaled(fence)) { bool sync_to_ring[RADEON_NUM_RINGS] = { }; - sync_to_ring[old_fence->ring] = true; + sync_to_ring[fence->ring] = true; r = radeon_semaphore_create(rdev, &sem); if (r) { - radeon_fence_unref(&fence); return r; } - r = radeon_semaphore_sync_rings(rdev, sem, - sync_to_ring, fence->ring); + r = radeon_semaphore_sync_rings(rdev, sem, sync_to_ring, ridx); if (r) { radeon_semaphore_free(rdev, sem, NULL); - radeon_fence_unref(&fence); return r; } } + fence = NULL; r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ - fence); + &fence); /* FIXME: handle copy error */ r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, evict, no_wait_reserve, no_wait_gpu, new_mem); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index c7b61f16ecfd..8868a1fa20e5 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -1762,7 +1762,7 @@ void si_fence_ring_emit(struct radeon_device *rdev, */ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) { - struct radeon_ring *ring = &rdev->ring[ib->fence->ring]; + struct radeon_ring *ring = &rdev->ring[ib->ring]; u32 header; if (ib->is_const_ib) @@ -2702,7 +2702,7 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) if (ib->is_const_ib) ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt); else { - switch (ib->fence->ring) { + switch (ib->ring) { case RADEON_RING_TYPE_GFX_INDEX: ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt); break; @@ -2711,7 +2711,7 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt); break; default: - dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->fence->ring); + dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring); ret = -EINVAL; break; } -- cgit v1.2.3 From 68e250b7c281dbb75ea2a892a7d4ca27f974fc91 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 10 May 2012 15:57:31 +0200 Subject: drm/radeon: add infrastructure for advanced ring synchronization v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v2: BUG_ON not matching rings. Signed-off-by: Christian König Reviewed-by: Jerome Glisse --- drivers/gpu/drm/radeon/radeon.h | 25 +++++++++++- drivers/gpu/drm/radeon/radeon_fence.c | 73 ++++++++++++++++++++++++++++++----- 2 files changed, 87 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index e2feddd91df5..33a72dc9af85 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -257,8 +257,8 @@ struct radeon_fence_driver { uint32_t scratch_reg; uint64_t gpu_addr; volatile uint32_t *cpu_addr; - /* seq is protected by ring emission lock */ - uint64_t seq; + /* sync_seq is protected by ring emission lock */ + uint64_t sync_seq[RADEON_NUM_RINGS]; atomic64_t last_seq; unsigned long last_activity; bool initialized; @@ -288,6 +288,27 @@ int radeon_fence_wait_any(struct radeon_device *rdev, struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); void radeon_fence_unref(struct radeon_fence **fence); unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring); +bool radeon_fence_need_sync(struct radeon_fence *fence, int ring); +void radeon_fence_note_sync(struct radeon_fence *fence, int ring); +static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a, + struct radeon_fence *b) +{ + if (!a) { + return b; + } + + if (!b) { + return a; + } + + BUG_ON(a->ring != b->ring); + + if (a->seq > b->seq) { + return a; + } else { + return b; + } +} /* * Tiling registers diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 401d346a05c0..7b55625a5e18 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -72,7 +72,7 @@ int radeon_fence_emit(struct radeon_device *rdev, } kref_init(&((*fence)->kref)); (*fence)->rdev = rdev; - (*fence)->seq = ++rdev->fence_drv[ring].seq; + (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring]; (*fence)->ring = ring; radeon_fence_ring_emit(rdev, ring, *fence); trace_radeon_fence_emit(rdev->ddev, (*fence)->seq); @@ -449,7 +449,7 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) * wait. */ seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; - if (seq >= rdev->fence_drv[ring].seq) { + if (seq >= rdev->fence_drv[ring].sync_seq[ring]) { /* nothing to wait for, last_seq is already the last emited fence */ return -ENOENT; @@ -464,7 +464,7 @@ int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) * activity can be scheduled so there won't be concurrent access * to seq value. */ - return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].seq, + return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].sync_seq[ring], ring, false, false); } @@ -492,7 +492,8 @@ unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) * but it's ok to report slightly wrong fence count here. */ radeon_fence_process(rdev, ring); - emitted = rdev->fence_drv[ring].seq - atomic64_read(&rdev->fence_drv[ring].last_seq); + emitted = rdev->fence_drv[ring].sync_seq[ring] + - atomic64_read(&rdev->fence_drv[ring].last_seq); /* to avoid 32bits warp around */ if (emitted > 0x10000000) { emitted = 0x10000000; @@ -500,6 +501,51 @@ unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) return (unsigned)emitted; } +bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring) +{ + struct radeon_fence_driver *fdrv; + + if (!fence) { + return false; + } + + if (fence->ring == dst_ring) { + return false; + } + + /* we are protected by the ring mutex */ + fdrv = &fence->rdev->fence_drv[dst_ring]; + if (fence->seq <= fdrv->sync_seq[fence->ring]) { + return false; + } + + return true; +} + +void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring) +{ + struct radeon_fence_driver *dst, *src; + unsigned i; + + if (!fence) { + return; + } + + if (fence->ring == dst_ring) { + return; + } + + /* we are protected by the ring mutex */ + src = &fence->rdev->fence_drv[fence->ring]; + dst = &fence->rdev->fence_drv[dst_ring]; + for (i = 0; i < RADEON_NUM_RINGS; ++i) { + if (i == dst_ring) { + continue; + } + dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); + } +} + int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) { uint64_t index; @@ -521,7 +567,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) } rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; - radeon_fence_write(rdev, rdev->fence_drv[ring].seq, ring); + radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring); rdev->fence_drv[ring].initialized = true; dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n", ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); @@ -530,10 +576,13 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) { + int i; + rdev->fence_drv[ring].scratch_reg = -1; rdev->fence_drv[ring].cpu_addr = NULL; rdev->fence_drv[ring].gpu_addr = 0; - rdev->fence_drv[ring].seq = 0; + for (i = 0; i < RADEON_NUM_RINGS; ++i) + rdev->fence_drv[ring].sync_seq[i] = 0; atomic64_set(&rdev->fence_drv[ring].last_seq, 0); rdev->fence_drv[ring].last_activity = jiffies; rdev->fence_drv[ring].initialized = false; @@ -579,7 +628,7 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; - int i; + int i, j; for (i = 0; i < RADEON_NUM_RINGS; ++i) { if (!rdev->fence_drv[i].initialized) @@ -588,8 +637,14 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data) seq_printf(m, "--- ring %d ---\n", i); seq_printf(m, "Last signaled fence 0x%016llx\n", (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq)); - seq_printf(m, "Last emitted 0x%016llx\n", - rdev->fence_drv[i].seq); + seq_printf(m, "Last emitted 0x%016llx\n", + rdev->fence_drv[i].sync_seq[i]); + + for (j = 0; j < RADEON_NUM_RINGS; ++j) { + if (i != j && rdev->fence_drv[j].initialized) + seq_printf(m, "Last sync to ring %d 0x%016llx\n", + j, rdev->fence_drv[i].sync_seq[j]); + } } return 0; } -- cgit v1.2.3 From 220907d9835ce5181b9f782c862b1ee7a4d24c23 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 10 May 2012 16:46:43 +0200 Subject: drm/radeon: rework ring syncing code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move inter ring syncing with semaphores into the existing ring allocations, with that we need to lock the ring mutex only once. Signed-off-by: Christian König Reviewed-by: Jerome Glisse --- drivers/gpu/drm/radeon/evergreen_blit_kms.c | 3 +- drivers/gpu/drm/radeon/r600.c | 5 +- drivers/gpu/drm/radeon/r600_blit_kms.c | 24 ++++++++-- drivers/gpu/drm/radeon/radeon.h | 6 +-- drivers/gpu/drm/radeon/radeon_asic.h | 5 +- drivers/gpu/drm/radeon/radeon_cs.c | 38 ++++----------- drivers/gpu/drm/radeon/radeon_ring.c | 30 ++++++++++-- drivers/gpu/drm/radeon/radeon_semaphore.c | 71 ++++++++++------------------- drivers/gpu/drm/radeon/radeon_test.c | 6 +-- drivers/gpu/drm/radeon/radeon_ttm.c | 20 -------- 10 files changed, 92 insertions(+), 116 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index 1e96bd458cfd..e512560ffc6f 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c @@ -622,7 +622,8 @@ int evergreen_blit_init(struct radeon_device *rdev) rdev->r600_blit.primitives.draw_auto = draw_auto; rdev->r600_blit.primitives.set_default_state = set_default_state; - rdev->r600_blit.ring_size_common = 55; /* shaders + def state */ + rdev->r600_blit.ring_size_common = 8; /* sync semaphore */ + rdev->r600_blit.ring_size_common += 55; /* shaders + def state */ rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */ rdev->r600_blit.ring_size_common += 5; /* done copy */ rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */ diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index a80e61e138db..aced97bb79e0 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2311,15 +2311,16 @@ int r600_copy_blit(struct radeon_device *rdev, unsigned num_gpu_pages, struct radeon_fence **fence) { + struct radeon_semaphore *sem = NULL; struct radeon_sa_bo *vb = NULL; int r; - r = r600_blit_prepare_copy(rdev, num_gpu_pages, &vb); + r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem); if (r) { return r; } r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb); - r600_blit_done_copy(rdev, fence, vb); + r600_blit_done_copy(rdev, fence, vb, sem); return 0; } diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 02f4eebf805f..2b8d6418a30c 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -512,7 +512,8 @@ int r600_blit_init(struct radeon_device *rdev) rdev->r600_blit.primitives.draw_auto = draw_auto; rdev->r600_blit.primitives.set_default_state = set_default_state; - rdev->r600_blit.ring_size_common = 40; /* shaders + def state */ + rdev->r600_blit.ring_size_common = 8; /* sync semaphore */ + rdev->r600_blit.ring_size_common += 40; /* shaders + def state */ rdev->r600_blit.ring_size_common += 5; /* done copy */ rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */ @@ -666,7 +667,8 @@ static unsigned r600_blit_create_rect(unsigned num_gpu_pages, int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages, - struct radeon_sa_bo **vb) + struct radeon_fence **fence, struct radeon_sa_bo **vb, + struct radeon_semaphore **sem) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; int r; @@ -689,22 +691,37 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages, return r; } + r = radeon_semaphore_create(rdev, sem); + if (r) { + radeon_sa_bo_free(rdev, vb, NULL); + return r; + } + /* calculate number of loops correctly */ ring_size = num_loops * dwords_per_loop; ring_size += rdev->r600_blit.ring_size_common; r = radeon_ring_lock(rdev, ring, ring_size); if (r) { radeon_sa_bo_free(rdev, vb, NULL); + radeon_semaphore_free(rdev, sem, NULL); return r; } + if (radeon_fence_need_sync(*fence, RADEON_RING_TYPE_GFX_INDEX)) { + radeon_semaphore_sync_rings(rdev, *sem, (*fence)->ring, + RADEON_RING_TYPE_GFX_INDEX); + radeon_fence_note_sync(*fence, RADEON_RING_TYPE_GFX_INDEX); + } else { + radeon_semaphore_free(rdev, sem, NULL); + } + rdev->r600_blit.primitives.set_default_state(rdev); rdev->r600_blit.primitives.set_shaders(rdev); return 0; } void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence, - struct radeon_sa_bo *vb) + struct radeon_sa_bo *vb, struct radeon_semaphore *sem) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; int r; @@ -717,6 +734,7 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence radeon_ring_unlock_commit(rdev, ring); radeon_sa_bo_free(rdev, &vb, *fence); + radeon_semaphore_free(rdev, &sem, *fence); } void r600_kms_blit_copy(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 33a72dc9af85..4563e50cbdde 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -470,10 +470,9 @@ void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, struct radeon_semaphore *semaphore); int radeon_semaphore_sync_rings(struct radeon_device *rdev, struct radeon_semaphore *semaphore, - bool sync_to[RADEON_NUM_RINGS], - int dst_ring); + int signaler, int waiter); void radeon_semaphore_free(struct radeon_device *rdev, - struct radeon_semaphore *semaphore, + struct radeon_semaphore **semaphore, struct radeon_fence *fence); /* @@ -653,6 +652,7 @@ struct radeon_ib { struct radeon_fence *fence; unsigned vm_id; bool is_const_ib; + struct radeon_fence *sync_to[RADEON_NUM_RINGS]; struct radeon_semaphore *semaphore; }; diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 8cdf075aacfd..94c427ab0f5c 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -363,9 +363,10 @@ int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); /* r600 blit */ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages, - struct radeon_sa_bo **vb); + struct radeon_fence **fence, struct radeon_sa_bo **vb, + struct radeon_semaphore **sem); void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence, - struct radeon_sa_bo *vb); + struct radeon_sa_bo *vb, struct radeon_semaphore *sem); void r600_kms_blit_copy(struct radeon_device *rdev, u64 src_gpu_addr, u64 dst_gpu_addr, unsigned num_gpu_pages, diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 142f89462aa4..dd3e234294e4 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -115,36 +115,20 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority return 0; } -static int radeon_cs_sync_rings(struct radeon_cs_parser *p) +static void radeon_cs_sync_rings(struct radeon_cs_parser *p) { - bool sync_to_ring[RADEON_NUM_RINGS] = { }; - bool need_sync = false; - int i, r; + int i; for (i = 0; i < p->nrelocs; i++) { - struct radeon_fence *fence; + struct radeon_fence *a, *b; if (!p->relocs[i].robj || !p->relocs[i].robj->tbo.sync_obj) continue; - fence = p->relocs[i].robj->tbo.sync_obj; - if (fence->ring != p->ring && !radeon_fence_signaled(fence)) { - sync_to_ring[fence->ring] = true; - need_sync = true; - } - } - - if (!need_sync) { - return 0; - } - - r = radeon_semaphore_create(p->rdev, &p->ib.semaphore); - if (r) { - return r; + a = p->relocs[i].robj->tbo.sync_obj; + b = p->ib.sync_to[a->ring]; + p->ib.sync_to[a->ring] = radeon_fence_later(a, b); } - - return radeon_semaphore_sync_rings(p->rdev, p->ib.semaphore, - sync_to_ring, p->ring); } /* XXX: note that this is called from the legacy UMS CS ioctl as well */ @@ -368,10 +352,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev, DRM_ERROR("Invalid command stream !\n"); return r; } - r = radeon_cs_sync_rings(parser); - if (r) { - DRM_ERROR("Failed to synchronize rings !\n"); - } + radeon_cs_sync_rings(parser); parser->ib.vm_id = 0; r = radeon_ib_schedule(rdev, &parser->ib); if (r) { @@ -468,10 +449,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, if (r) { goto out; } - r = radeon_cs_sync_rings(parser); - if (r) { - DRM_ERROR("Failed to synchronize rings !\n"); - } + radeon_cs_sync_rings(parser); if ((rdev->family >= CHIP_TAHITI) && (parser->chunk_const_ib_idx != -1)) { diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index dd506c216d8f..0826e77f99ae 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -42,7 +42,7 @@ int radeon_debugfs_sa_init(struct radeon_device *rdev); int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib *ib, unsigned size) { - int r; + int i, r; r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true); if (r) { @@ -50,20 +50,26 @@ int radeon_ib_get(struct radeon_device *rdev, int ring, return r; } + r = radeon_semaphore_create(rdev, &ib->semaphore); + if (r) { + return r; + } + ib->ring = ring; ib->fence = NULL; ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo); ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); ib->vm_id = 0; ib->is_const_ib = false; - ib->semaphore = NULL; + for (i = 0; i < RADEON_NUM_RINGS; ++i) + ib->sync_to[i] = NULL; return 0; } void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) { - radeon_semaphore_free(rdev, ib->semaphore, ib->fence); + radeon_semaphore_free(rdev, &ib->semaphore, ib->fence); radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence); radeon_fence_unref(&ib->fence); } @@ -71,7 +77,8 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) { struct radeon_ring *ring = &rdev->ring[ib->ring]; - int r = 0; + bool need_sync = false; + int i, r = 0; if (!ib->length_dw || !ring->ready) { /* TODO: Nothings in the ib we should report. */ @@ -80,11 +87,24 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) } /* 64 dwords should be enough for fence too */ - r = radeon_ring_lock(rdev, ring, 64); + r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8); if (r) { dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); return r; } + for (i = 0; i < RADEON_NUM_RINGS; ++i) { + struct radeon_fence *fence = ib->sync_to[i]; + if (radeon_fence_need_sync(fence, ib->ring)) { + need_sync = true; + radeon_semaphore_sync_rings(rdev, ib->semaphore, + fence->ring, ib->ring); + radeon_fence_note_sync(fence, ib->ring); + } + } + /* immediately free semaphore when we don't need to sync */ + if (!need_sync) { + radeon_semaphore_free(rdev, &ib->semaphore, NULL); + } radeon_ring_ib_execute(rdev, ib->ring, ib); r = radeon_fence_emit(rdev, &ib->fence, ib->ring); if (r) { diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c index e2ace5dce117..7cc78de6ddc3 100644 --- a/drivers/gpu/drm/radeon/radeon_semaphore.c +++ b/drivers/gpu/drm/radeon/radeon_semaphore.c @@ -68,70 +68,49 @@ void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true); } +/* caller must hold ring lock */ int radeon_semaphore_sync_rings(struct radeon_device *rdev, struct radeon_semaphore *semaphore, - bool sync_to[RADEON_NUM_RINGS], - int dst_ring) + int signaler, int waiter) { - int i = 0, r; + int r; - mutex_lock(&rdev->ring_lock); - r = radeon_ring_alloc(rdev, &rdev->ring[dst_ring], RADEON_NUM_RINGS * 8); - if (r) { - goto error; + /* no need to signal and wait on the same ring */ + if (signaler == waiter) { + return 0; } - for (i = 0; i < RADEON_NUM_RINGS; ++i) { - /* no need to sync to our own or unused rings */ - if (!sync_to[i] || i == dst_ring) - continue; - - /* prevent GPU deadlocks */ - if (!rdev->ring[i].ready) { - dev_err(rdev->dev, "Trying to sync to a disabled ring!"); - r = -EINVAL; - goto error; - } - - r = radeon_ring_alloc(rdev, &rdev->ring[i], 8); - if (r) { - goto error; - } - - radeon_semaphore_emit_signal(rdev, i, semaphore); - radeon_semaphore_emit_wait(rdev, dst_ring, semaphore); + /* prevent GPU deadlocks */ + if (!rdev->ring[signaler].ready) { + dev_err(rdev->dev, "Trying to sync to a disabled ring!"); + return -EINVAL; + } - radeon_ring_commit(rdev, &rdev->ring[i]); + r = radeon_ring_alloc(rdev, &rdev->ring[signaler], 8); + if (r) { + return r; } + radeon_semaphore_emit_signal(rdev, signaler, semaphore); + radeon_ring_commit(rdev, &rdev->ring[signaler]); - radeon_ring_commit(rdev, &rdev->ring[dst_ring]); - mutex_unlock(&rdev->ring_lock); + /* we assume caller has already allocated space on waiters ring */ + radeon_semaphore_emit_wait(rdev, waiter, semaphore); return 0; - -error: - /* unlock all locks taken so far */ - for (--i; i >= 0; --i) { - if (sync_to[i] || i == dst_ring) { - radeon_ring_undo(&rdev->ring[i]); - } - } - radeon_ring_undo(&rdev->ring[dst_ring]); - mutex_unlock(&rdev->ring_lock); - return r; } void radeon_semaphore_free(struct radeon_device *rdev, - struct radeon_semaphore *semaphore, + struct radeon_semaphore **semaphore, struct radeon_fence *fence) { - if (semaphore == NULL) { + if (semaphore == NULL || *semaphore == NULL) { return; } - if (semaphore->waiters > 0) { + if ((*semaphore)->waiters > 0) { dev_err(rdev->dev, "semaphore %p has more waiters than signalers," - " hardware lockup imminent!\n", semaphore); + " hardware lockup imminent!\n", *semaphore); } - radeon_sa_bo_free(rdev, &semaphore->sa_bo, fence); - kfree(semaphore); + radeon_sa_bo_free(rdev, &(*semaphore)->sa_bo, fence); + kfree(*semaphore); + *semaphore = NULL; } diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 47e1535f2706..a94f66fb3b13 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -303,8 +303,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev, } out_cleanup: - if (semaphore) - radeon_semaphore_free(rdev, semaphore, NULL); + radeon_semaphore_free(rdev, &semaphore, NULL); if (fence1) radeon_fence_unref(&fence1); @@ -422,8 +421,7 @@ void radeon_test_ring_sync2(struct radeon_device *rdev, } out_cleanup: - if (semaphore) - radeon_semaphore_free(rdev, semaphore, NULL); + radeon_semaphore_free(rdev, &semaphore, NULL); if (fenceA) radeon_fence_unref(&fenceA); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 2d36bdda9327..c43035c5cc3e 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -223,7 +223,6 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, struct radeon_device *rdev; uint64_t old_start, new_start; struct radeon_fence *fence; - struct radeon_semaphore *sem = NULL; int r, ridx; rdev = radeon_get_rdev(bo->bdev); @@ -262,31 +261,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, /* sync other rings */ fence = bo->sync_obj; - if (fence && fence->ring != ridx - && !radeon_fence_signaled(fence)) { - bool sync_to_ring[RADEON_NUM_RINGS] = { }; - sync_to_ring[fence->ring] = true; - - r = radeon_semaphore_create(rdev, &sem); - if (r) { - return r; - } - - r = radeon_semaphore_sync_rings(rdev, sem, sync_to_ring, ridx); - if (r) { - radeon_semaphore_free(rdev, sem, NULL); - return r; - } - } - - fence = NULL; r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ &fence); /* FIXME: handle copy error */ r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, evict, no_wait_reserve, no_wait_gpu, new_mem); - radeon_semaphore_free(rdev, sem, fence); radeon_fence_unref(&fence); return r; } -- cgit v1.2.3 From db7fce3983ad9b3deebda450121af4aaf6809ce2 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 11 May 2012 14:57:18 +0200 Subject: drm/radeon: replace vmram_mutex with mclk_lock v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It is a rw_semaphore now and only write locked while changing the clock. Also the lock is renamed to better reflect what it is protecting. v2: Keep the ttm_vm_ops on IGPs Signed-off-by: Christian König Reviewed-by: Jerome Glisse --- drivers/gpu/drm/radeon/radeon.h | 3 ++- drivers/gpu/drm/radeon/radeon_device.c | 2 +- drivers/gpu/drm/radeon/radeon_object.c | 8 ++++---- drivers/gpu/drm/radeon/radeon_pm.c | 4 ++-- drivers/gpu/drm/radeon/radeon_ttm.c | 4 ++-- 5 files changed, 11 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 4563e50cbdde..586f30e2cec8 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1059,6 +1059,8 @@ struct radeon_power_state { struct radeon_pm { struct mutex mutex; + /* write locked while reprogramming mclk */ + struct rw_semaphore mclk_lock; u32 active_crtcs; int active_crtc_count; int req_vblank; @@ -1554,7 +1556,6 @@ struct radeon_device { struct work_struct audio_work; int num_crtc; /* number of crtcs */ struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ - struct mutex vram_mutex; bool audio_enabled; struct r600_audio audio_status; /* audio stuff */ struct notifier_block acpi_nb; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 066c98b888a5..7667184ab0bf 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -735,7 +735,7 @@ int radeon_device_init(struct radeon_device *rdev, spin_lock_init(&rdev->ih.lock); mutex_init(&rdev->gem.mutex); mutex_init(&rdev->pm.mutex); - mutex_init(&rdev->vram_mutex); + init_rwsem(&rdev->pm.mclk_lock); init_waitqueue_head(&rdev->irq.vblank_queue); init_waitqueue_head(&rdev->irq.idle_queue); r = radeon_gem_init(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 830f1a7b486f..6ecb2006e27e 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -154,11 +154,11 @@ retry: INIT_LIST_HEAD(&bo->va); radeon_ttm_placement_from_domain(bo, domain); /* Kernel allocation are uninterruptible */ - mutex_lock(&rdev->vram_mutex); + down_read(&rdev->pm.mclk_lock); r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, &bo->placement, page_align, 0, !kernel, NULL, acc_size, sg, &radeon_ttm_bo_destroy); - mutex_unlock(&rdev->vram_mutex); + up_read(&rdev->pm.mclk_lock); if (unlikely(r != 0)) { if (r != -ERESTARTSYS) { if (domain == RADEON_GEM_DOMAIN_VRAM) { @@ -219,9 +219,9 @@ void radeon_bo_unref(struct radeon_bo **bo) return; rdev = (*bo)->rdev; tbo = &((*bo)->tbo); - mutex_lock(&rdev->vram_mutex); + down_read(&rdev->pm.mclk_lock); ttm_bo_unref(&tbo); - mutex_unlock(&rdev->vram_mutex); + up_read(&rdev->pm.mclk_lock); if (tbo == NULL) *bo = NULL; } diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 5b37e283ec38..9dc0b54fe3e3 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -251,7 +251,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) return; mutex_lock(&rdev->ddev->struct_mutex); - mutex_lock(&rdev->vram_mutex); + down_write(&rdev->pm.mclk_lock); mutex_lock(&rdev->ring_lock); /* gui idle int has issues on older chips it seems */ @@ -303,7 +303,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; mutex_unlock(&rdev->ring_lock); - mutex_unlock(&rdev->vram_mutex); + up_write(&rdev->pm.mclk_lock); mutex_unlock(&rdev->ddev->struct_mutex); } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index c43035c5cc3e..0881131a0388 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -797,9 +797,9 @@ static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) return VM_FAULT_NOPAGE; } rdev = radeon_get_rdev(bo->bdev); - mutex_lock(&rdev->vram_mutex); + down_read(&rdev->pm.mclk_lock); r = ttm_vm_ops->fault(vma, vmf); - mutex_unlock(&rdev->vram_mutex); + up_read(&rdev->pm.mclk_lock); return r; } -- cgit v1.2.3 From 6823d74003abedd688a3f535aefe6ce0e06444fd Mon Sep 17 00:00:00 2001 From: Christian Koenig Date: Wed, 16 May 2012 20:24:36 +0200 Subject: drm/radeon: remove some unneeded structure members Signed-off-by: Christian Koenig Reviewed-by: Jerome Glisse --- drivers/gpu/drm/radeon/radeon.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 586f30e2cec8..9d6c1de93591 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -738,9 +738,7 @@ struct r600_ih { struct radeon_bo *ring_obj; volatile uint32_t *ring; unsigned rptr; - unsigned rptr_offs; unsigned wptr; - unsigned wptr_old; unsigned ring_size; uint64_t gpu_addr; uint32_t ptr_mask; -- cgit v1.2.3 From c20dc3698dc7ecf053e2bf77299ae5982c0c2c45 Mon Sep 17 00:00:00 2001 From: Christian Koenig Date: Wed, 16 May 2012 21:45:24 +0200 Subject: drm/radeon: fix & improve ih ring handling v3 The spinlock was actually there to protect the rptr, but rptr was read outside of the locked area. Also we don't really need a spinlock here, an atomic should to quite fine since we only need to prevent it from being reentrant. v2: Keep the spinlock.... v3: Back to an atomic again after finding & fixing the real bug. Signed-off-by: Christian Koenig --- drivers/gpu/drm/radeon/evergreen.c | 26 +++++++++++++------------- drivers/gpu/drm/radeon/r600.c | 29 +++++++++++++---------------- drivers/gpu/drm/radeon/radeon.h | 3 +-- drivers/gpu/drm/radeon/radeon_device.c | 3 +-- drivers/gpu/drm/radeon/si.c | 27 +++++++++++++-------------- 5 files changed, 41 insertions(+), 47 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 8e40ba4a9b79..765bd1f2e1ce 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2676,7 +2676,6 @@ int evergreen_irq_process(struct radeon_device *rdev) u32 rptr; u32 src_id, src_data; u32 ring_index; - unsigned long flags; bool queue_hotplug = false; bool queue_hdmi = false; @@ -2684,22 +2683,21 @@ int evergreen_irq_process(struct radeon_device *rdev) return IRQ_NONE; wptr = evergreen_get_ih_wptr(rdev); + +restart_ih: + /* is somebody else already processing irqs? */ + if (atomic_xchg(&rdev->ih.lock, 1)) + return IRQ_NONE; + rptr = rdev->ih.rptr; DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); - spin_lock_irqsave(&rdev->ih.lock, flags); - if (rptr == wptr) { - spin_unlock_irqrestore(&rdev->ih.lock, flags); - return IRQ_NONE; - } -restart_ih: /* Order reading of wptr vs. reading of IH ring data */ rmb(); /* display interrupts */ evergreen_irq_ack(rdev); - rdev->ih.wptr = wptr; while (rptr != wptr) { /* wptr/rptr are in bytes! */ ring_index = rptr / 4; @@ -2998,17 +2996,19 @@ restart_ih: rptr += 16; rptr &= rdev->ih.ptr_mask; } - /* make sure wptr hasn't changed while processing */ - wptr = evergreen_get_ih_wptr(rdev); - if (wptr != rdev->ih.wptr) - goto restart_ih; if (queue_hotplug) schedule_work(&rdev->hotplug_work); if (queue_hdmi) schedule_work(&rdev->audio_work); rdev->ih.rptr = rptr; WREG32(IH_RB_RPTR, rdev->ih.rptr); - spin_unlock_irqrestore(&rdev->ih.lock, flags); + atomic_set(&rdev->ih.lock, 0); + + /* make sure wptr hasn't changed while processing */ + wptr = evergreen_get_ih_wptr(rdev); + if (wptr != rptr) + goto restart_ih; + return IRQ_HANDLED; } diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index aced97bb79e0..b487c69a486d 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2858,7 +2858,6 @@ void r600_disable_interrupts(struct radeon_device *rdev) WREG32(IH_RB_RPTR, 0); WREG32(IH_RB_WPTR, 0); rdev->ih.enabled = false; - rdev->ih.wptr = 0; rdev->ih.rptr = 0; } @@ -3310,7 +3309,6 @@ int r600_irq_process(struct radeon_device *rdev) u32 rptr; u32 src_id, src_data; u32 ring_index; - unsigned long flags; bool queue_hotplug = false; bool queue_hdmi = false; @@ -3322,24 +3320,21 @@ int r600_irq_process(struct radeon_device *rdev) RREG32(IH_RB_WPTR); wptr = r600_get_ih_wptr(rdev); - rptr = rdev->ih.rptr; - DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); - - spin_lock_irqsave(&rdev->ih.lock, flags); - if (rptr == wptr) { - spin_unlock_irqrestore(&rdev->ih.lock, flags); +restart_ih: + /* is somebody else already processing irqs? */ + if (atomic_xchg(&rdev->ih.lock, 1)) return IRQ_NONE; - } -restart_ih: + rptr = rdev->ih.rptr; + DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); + /* Order reading of wptr vs. reading of IH ring data */ rmb(); /* display interrupts */ r600_irq_ack(rdev); - rdev->ih.wptr = wptr; while (rptr != wptr) { /* wptr/rptr are in bytes! */ ring_index = rptr / 4; @@ -3493,17 +3488,19 @@ restart_ih: rptr += 16; rptr &= rdev->ih.ptr_mask; } - /* make sure wptr hasn't changed while processing */ - wptr = r600_get_ih_wptr(rdev); - if (wptr != rdev->ih.wptr) - goto restart_ih; if (queue_hotplug) schedule_work(&rdev->hotplug_work); if (queue_hdmi) schedule_work(&rdev->audio_work); rdev->ih.rptr = rptr; WREG32(IH_RB_RPTR, rdev->ih.rptr); - spin_unlock_irqrestore(&rdev->ih.lock, flags); + atomic_set(&rdev->ih.lock, 0); + + /* make sure wptr hasn't changed while processing */ + wptr = r600_get_ih_wptr(rdev); + if (wptr != rptr) + goto restart_ih; + return IRQ_HANDLED; } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 9d6c1de93591..5ddf895a5926 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -738,11 +738,10 @@ struct r600_ih { struct radeon_bo *ring_obj; volatile uint32_t *ring; unsigned rptr; - unsigned wptr; unsigned ring_size; uint64_t gpu_addr; uint32_t ptr_mask; - spinlock_t lock; + atomic_t lock; bool enabled; }; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 7667184ab0bf..3c563d1f9671 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -731,8 +731,7 @@ int radeon_device_init(struct radeon_device *rdev, radeon_mutex_init(&rdev->cs_mutex); mutex_init(&rdev->ring_lock); mutex_init(&rdev->dc_hw_i2c_mutex); - if (rdev->family >= CHIP_R600) - spin_lock_init(&rdev->ih.lock); + atomic_set(&rdev->ih.lock, 0); mutex_init(&rdev->gem.mutex); mutex_init(&rdev->pm.mutex); init_rwsem(&rdev->pm.mclk_lock); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 8868a1fa20e5..ecef972050d1 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -2942,7 +2942,6 @@ static void si_disable_interrupts(struct radeon_device *rdev) WREG32(IH_RB_RPTR, 0); WREG32(IH_RB_WPTR, 0); rdev->ih.enabled = false; - rdev->ih.wptr = 0; rdev->ih.rptr = 0; } @@ -3359,29 +3358,27 @@ int si_irq_process(struct radeon_device *rdev) u32 rptr; u32 src_id, src_data, ring_id; u32 ring_index; - unsigned long flags; bool queue_hotplug = false; if (!rdev->ih.enabled || rdev->shutdown) return IRQ_NONE; wptr = si_get_ih_wptr(rdev); + +restart_ih: + /* is somebody else already processing irqs? */ + if (atomic_xchg(&rdev->ih.lock, 1)) + return IRQ_NONE; + rptr = rdev->ih.rptr; DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr); - spin_lock_irqsave(&rdev->ih.lock, flags); - if (rptr == wptr) { - spin_unlock_irqrestore(&rdev->ih.lock, flags); - return IRQ_NONE; - } -restart_ih: /* Order reading of wptr vs. reading of IH ring data */ rmb(); /* display interrupts */ si_irq_ack(rdev); - rdev->ih.wptr = wptr; while (rptr != wptr) { /* wptr/rptr are in bytes! */ ring_index = rptr / 4; @@ -3632,15 +3629,17 @@ restart_ih: rptr += 16; rptr &= rdev->ih.ptr_mask; } - /* make sure wptr hasn't changed while processing */ - wptr = si_get_ih_wptr(rdev); - if (wptr != rdev->ih.wptr) - goto restart_ih; if (queue_hotplug) schedule_work(&rdev->hotplug_work); rdev->ih.rptr = rptr; WREG32(IH_RB_RPTR, rdev->ih.rptr); - spin_unlock_irqrestore(&rdev->ih.lock, flags); + atomic_set(&rdev->ih.lock, 0); + + /* make sure wptr hasn't changed while processing */ + wptr = si_get_ih_wptr(rdev); + if (wptr != rptr) + goto restart_ih; + return IRQ_HANDLED; } -- cgit v1.2.3 From fb98257a9d9d2089972b18079d5bdd4412e107e2 Mon Sep 17 00:00:00 2001 From: Christian Koenig Date: Thu, 17 May 2012 01:33:30 +0200 Subject: drm/radeon: apply Murphy's law to the kms irq code v3 1. It is really dangerous to have more than one spinlock protecting the same information. 2. radeon_irq_set sometimes wasn't called with lock protection, so it can happen that more than one CPU would tamper with the irq regs at the same time. 3. The pm.gui_idle variable was assuming that the 3D engine wasn't becoming idle between testing the register and setting the variable. So just remove it and test the register directly. v2: Also handle the hpd irq code the same way. v3: Rename hpd parameter for clarification. Signed-off-by: Christian Koenig Reviewed-by: Alex Deucher --- drivers/gpu/drm/radeon/evergreen.c | 21 +++----- drivers/gpu/drm/radeon/r100.c | 29 +++------- drivers/gpu/drm/radeon/r600.c | 38 ++++--------- drivers/gpu/drm/radeon/r600_hdmi.c | 6 +-- drivers/gpu/drm/radeon/radeon.h | 35 ++++++------ drivers/gpu/drm/radeon/radeon_irq_kms.c | 96 ++++++++++++++++++++++++++++----- drivers/gpu/drm/radeon/radeon_kms.c | 12 +++-- drivers/gpu/drm/radeon/radeon_pm.c | 12 +---- drivers/gpu/drm/radeon/rs600.c | 13 +++-- drivers/gpu/drm/radeon/si.c | 1 - 10 files changed, 144 insertions(+), 119 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 765bd1f2e1ce..bdc1f30d7474 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -428,6 +428,7 @@ void evergreen_hpd_init(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; struct drm_connector *connector; + unsigned enabled = 0; u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN; @@ -436,73 +437,64 @@ void evergreen_hpd_init(struct radeon_device *rdev) switch (radeon_connector->hpd.hpd) { case RADEON_HPD_1: WREG32(DC_HPD1_CONTROL, tmp); - rdev->irq.hpd[0] = true; break; case RADEON_HPD_2: WREG32(DC_HPD2_CONTROL, tmp); - rdev->irq.hpd[1] = true; break; case RADEON_HPD_3: WREG32(DC_HPD3_CONTROL, tmp); - rdev->irq.hpd[2] = true; break; case RADEON_HPD_4: WREG32(DC_HPD4_CONTROL, tmp); - rdev->irq.hpd[3] = true; break; case RADEON_HPD_5: WREG32(DC_HPD5_CONTROL, tmp); - rdev->irq.hpd[4] = true; break; case RADEON_HPD_6: WREG32(DC_HPD6_CONTROL, tmp); - rdev->irq.hpd[5] = true; break; default: break; } radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); + enabled |= 1 << radeon_connector->hpd.hpd; } - if (rdev->irq.installed) - evergreen_irq_set(rdev); + radeon_irq_kms_enable_hpd(rdev, enabled); } void evergreen_hpd_fini(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; struct drm_connector *connector; + unsigned disabled = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); switch (radeon_connector->hpd.hpd) { case RADEON_HPD_1: WREG32(DC_HPD1_CONTROL, 0); - rdev->irq.hpd[0] = false; break; case RADEON_HPD_2: WREG32(DC_HPD2_CONTROL, 0); - rdev->irq.hpd[1] = false; break; case RADEON_HPD_3: WREG32(DC_HPD3_CONTROL, 0); - rdev->irq.hpd[2] = false; break; case RADEON_HPD_4: WREG32(DC_HPD4_CONTROL, 0); - rdev->irq.hpd[3] = false; break; case RADEON_HPD_5: WREG32(DC_HPD5_CONTROL, 0); - rdev->irq.hpd[4] = false; break; case RADEON_HPD_6: WREG32(DC_HPD6_CONTROL, 0); - rdev->irq.hpd[5] = false; break; default: break; } + disabled |= 1 << radeon_connector->hpd.hpd; } + radeon_irq_kms_disable_hpd(rdev, disabled); } /* watermark setup */ @@ -2984,7 +2976,6 @@ restart_ih: break; case 233: /* GUI IDLE */ DRM_DEBUG("IH: GUI idle\n"); - rdev->pm.gui_idle = true; wake_up(&rdev->irq.idle_queue); break; default: diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 415b7d8fbba2..e8fe4ae3bc23 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -567,43 +567,27 @@ void r100_hpd_init(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; struct drm_connector *connector; + unsigned enable = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); - switch (radeon_connector->hpd.hpd) { - case RADEON_HPD_1: - rdev->irq.hpd[0] = true; - break; - case RADEON_HPD_2: - rdev->irq.hpd[1] = true; - break; - default: - break; - } + enable |= 1 << radeon_connector->hpd.hpd; radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); } - if (rdev->irq.installed) - r100_irq_set(rdev); + radeon_irq_kms_enable_hpd(rdev, enable); } void r100_hpd_fini(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; struct drm_connector *connector; + unsigned disable = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); - switch (radeon_connector->hpd.hpd) { - case RADEON_HPD_1: - rdev->irq.hpd[0] = false; - break; - case RADEON_HPD_2: - rdev->irq.hpd[1] = false; - break; - default: - break; - } + disable |= 1 << radeon_connector->hpd.hpd; } + radeon_irq_kms_disable_hpd(rdev, disable); } /* @@ -782,7 +766,6 @@ int r100_irq_process(struct radeon_device *rdev) /* gui idle interrupt */ if (status & RADEON_GUI_IDLE_STAT) { rdev->irq.gui_idle_acked = true; - rdev->pm.gui_idle = true; wake_up(&rdev->irq.idle_queue); } /* Vertical blank interrupts */ diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index b487c69a486d..9cd77286542c 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -709,6 +709,7 @@ void r600_hpd_init(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; struct drm_connector *connector; + unsigned enable = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); @@ -729,28 +730,22 @@ void r600_hpd_init(struct radeon_device *rdev) switch (radeon_connector->hpd.hpd) { case RADEON_HPD_1: WREG32(DC_HPD1_CONTROL, tmp); - rdev->irq.hpd[0] = true; break; case RADEON_HPD_2: WREG32(DC_HPD2_CONTROL, tmp); - rdev->irq.hpd[1] = true; break; case RADEON_HPD_3: WREG32(DC_HPD3_CONTROL, tmp); - rdev->irq.hpd[2] = true; break; case RADEON_HPD_4: WREG32(DC_HPD4_CONTROL, tmp); - rdev->irq.hpd[3] = true; break; /* DCE 3.2 */ case RADEON_HPD_5: WREG32(DC_HPD5_CONTROL, tmp); - rdev->irq.hpd[4] = true; break; case RADEON_HPD_6: WREG32(DC_HPD6_CONTROL, tmp); - rdev->irq.hpd[5] = true; break; default: break; @@ -759,85 +754,73 @@ void r600_hpd_init(struct radeon_device *rdev) switch (radeon_connector->hpd.hpd) { case RADEON_HPD_1: WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN); - rdev->irq.hpd[0] = true; break; case RADEON_HPD_2: WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN); - rdev->irq.hpd[1] = true; break; case RADEON_HPD_3: WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN); - rdev->irq.hpd[2] = true; break; default: break; } } + enable |= 1 << radeon_connector->hpd.hpd; radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); } - if (rdev->irq.installed) - r600_irq_set(rdev); + radeon_irq_kms_enable_hpd(rdev, enable); } void r600_hpd_fini(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; struct drm_connector *connector; + unsigned disable = 0; - if (ASIC_IS_DCE3(rdev)) { - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - struct radeon_connector *radeon_connector = to_radeon_connector(connector); + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + if (ASIC_IS_DCE3(rdev)) { switch (radeon_connector->hpd.hpd) { case RADEON_HPD_1: WREG32(DC_HPD1_CONTROL, 0); - rdev->irq.hpd[0] = false; break; case RADEON_HPD_2: WREG32(DC_HPD2_CONTROL, 0); - rdev->irq.hpd[1] = false; break; case RADEON_HPD_3: WREG32(DC_HPD3_CONTROL, 0); - rdev->irq.hpd[2] = false; break; case RADEON_HPD_4: WREG32(DC_HPD4_CONTROL, 0); - rdev->irq.hpd[3] = false; break; /* DCE 3.2 */ case RADEON_HPD_5: WREG32(DC_HPD5_CONTROL, 0); - rdev->irq.hpd[4] = false; break; case RADEON_HPD_6: WREG32(DC_HPD6_CONTROL, 0); - rdev->irq.hpd[5] = false; break; default: break; } - } - } else { - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - struct radeon_connector *radeon_connector = to_radeon_connector(connector); + } else { switch (radeon_connector->hpd.hpd) { case RADEON_HPD_1: WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0); - rdev->irq.hpd[0] = false; break; case RADEON_HPD_2: WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0); - rdev->irq.hpd[1] = false; break; case RADEON_HPD_3: WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0); - rdev->irq.hpd[2] = false; break; default: break; } } + disable |= 1 << radeon_connector->hpd.hpd; } + radeon_irq_kms_disable_hpd(rdev, disable); } /* @@ -3476,7 +3459,6 @@ restart_ih: break; case 233: /* GUI IDLE */ DRM_DEBUG("IH: GUI idle\n"); - rdev->pm.gui_idle = true; wake_up(&rdev->irq.idle_queue); break; default: diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index 82a0a4c919c0..e3558c3ef24a 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -519,8 +519,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder) if (rdev->irq.installed) { /* if irq is available use it */ - rdev->irq.afmt[dig->afmt->id] = true; - radeon_irq_set(rdev); + radeon_irq_kms_enable_afmt(rdev, dig->afmt->id); } dig->afmt->enabled = true; @@ -556,8 +555,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder) offset, radeon_encoder->encoder_id); /* disable irq */ - rdev->irq.afmt[dig->afmt->id] = false; - radeon_irq_set(rdev); + radeon_irq_kms_disable_afmt(rdev, dig->afmt->id); /* Older chipsets not handled by AtomBIOS */ if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) { diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 5ddf895a5926..353a1830514a 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -615,21 +615,20 @@ union radeon_irq_stat_regs { #define RADEON_MAX_AFMT_BLOCKS 6 struct radeon_irq { - bool installed; - bool sw_int[RADEON_NUM_RINGS]; - bool crtc_vblank_int[RADEON_MAX_CRTCS]; - bool pflip[RADEON_MAX_CRTCS]; - wait_queue_head_t vblank_queue; - bool hpd[RADEON_MAX_HPD_PINS]; - bool gui_idle; - bool gui_idle_acked; - wait_queue_head_t idle_queue; - bool afmt[RADEON_MAX_AFMT_BLOCKS]; - spinlock_t sw_lock; - int sw_refcount[RADEON_NUM_RINGS]; - union radeon_irq_stat_regs stat_regs; - spinlock_t pflip_lock[RADEON_MAX_CRTCS]; - int pflip_refcount[RADEON_MAX_CRTCS]; + bool installed; + spinlock_t lock; + bool sw_int[RADEON_NUM_RINGS]; + int sw_refcount[RADEON_NUM_RINGS]; + bool crtc_vblank_int[RADEON_MAX_CRTCS]; + bool pflip[RADEON_MAX_CRTCS]; + int pflip_refcount[RADEON_MAX_CRTCS]; + wait_queue_head_t vblank_queue; + bool hpd[RADEON_MAX_HPD_PINS]; + bool gui_idle; + bool gui_idle_acked; + wait_queue_head_t idle_queue; + bool afmt[RADEON_MAX_AFMT_BLOCKS]; + union radeon_irq_stat_regs stat_regs; }; int radeon_irq_kms_init(struct radeon_device *rdev); @@ -638,6 +637,11 @@ void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring); void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring); void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc); void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc); +void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block); +void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block); +void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask); +void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask); +int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev); /* * CP & rings. @@ -1062,7 +1066,6 @@ struct radeon_pm { int active_crtc_count; int req_vblank; bool vblank_sync; - bool gui_idle; fixed20_12 max_bandwidth; fixed20_12 igp_sideport_mclk; fixed20_12 igp_system_mclk; diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 5df58d1aba06..c5eb7a1461cf 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -32,6 +32,8 @@ #include "radeon.h" #include "atom.h" +#define RADEON_WAIT_IDLE_TIMEOUT 200 + irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; @@ -62,8 +64,10 @@ static void radeon_hotplug_work_func(struct work_struct *work) void radeon_driver_irq_preinstall_kms(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; + unsigned long irqflags; unsigned i; + spin_lock_irqsave(&rdev->irq.lock, irqflags); /* Disable *all* interrupts */ for (i = 0; i < RADEON_NUM_RINGS; i++) rdev->irq.sw_int[i] = false; @@ -76,6 +80,7 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) rdev->irq.afmt[i] = false; } radeon_irq_set(rdev); + spin_unlock_irqrestore(&rdev->irq.lock, irqflags); /* Clear bits */ radeon_irq_process(rdev); } @@ -83,23 +88,28 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) int radeon_driver_irq_postinstall_kms(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; + unsigned long irqflags; unsigned i; dev->max_vblank_count = 0x001fffff; + spin_lock_irqsave(&rdev->irq.lock, irqflags); for (i = 0; i < RADEON_NUM_RINGS; i++) rdev->irq.sw_int[i] = true; radeon_irq_set(rdev); + spin_unlock_irqrestore(&rdev->irq.lock, irqflags); return 0; } void radeon_driver_irq_uninstall_kms(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; + unsigned long irqflags; unsigned i; if (rdev == NULL) { return; } + spin_lock_irqsave(&rdev->irq.lock, irqflags); /* Disable *all* interrupts */ for (i = 0; i < RADEON_NUM_RINGS; i++) rdev->irq.sw_int[i] = false; @@ -112,6 +122,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) rdev->irq.afmt[i] = false; } radeon_irq_set(rdev); + spin_unlock_irqrestore(&rdev->irq.lock, irqflags); } static bool radeon_msi_ok(struct radeon_device *rdev) @@ -168,15 +179,12 @@ static bool radeon_msi_ok(struct radeon_device *rdev) int radeon_irq_kms_init(struct radeon_device *rdev) { - int i; int r = 0; INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); - spin_lock_init(&rdev->irq.sw_lock); - for (i = 0; i < rdev->num_crtc; i++) - spin_lock_init(&rdev->irq.pflip_lock[i]); + spin_lock_init(&rdev->irq.lock); r = drm_vblank_init(rdev->ddev, rdev->num_crtc); if (r) { return r; @@ -217,25 +225,25 @@ void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring) { unsigned long irqflags; - spin_lock_irqsave(&rdev->irq.sw_lock, irqflags); + spin_lock_irqsave(&rdev->irq.lock, irqflags); if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount[ring] == 1)) { rdev->irq.sw_int[ring] = true; radeon_irq_set(rdev); } - spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags); + spin_unlock_irqrestore(&rdev->irq.lock, irqflags); } void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring) { unsigned long irqflags; - spin_lock_irqsave(&rdev->irq.sw_lock, irqflags); + spin_lock_irqsave(&rdev->irq.lock, irqflags); BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount[ring] <= 0); if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount[ring] == 0)) { rdev->irq.sw_int[ring] = false; radeon_irq_set(rdev); } - spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags); + spin_unlock_irqrestore(&rdev->irq.lock, irqflags); } void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc) @@ -245,12 +253,12 @@ void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc) if (crtc < 0 || crtc >= rdev->num_crtc) return; - spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags); + spin_lock_irqsave(&rdev->irq.lock, irqflags); if (rdev->ddev->irq_enabled && (++rdev->irq.pflip_refcount[crtc] == 1)) { rdev->irq.pflip[crtc] = true; radeon_irq_set(rdev); } - spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags); + spin_unlock_irqrestore(&rdev->irq.lock, irqflags); } void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc) @@ -260,12 +268,76 @@ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc) if (crtc < 0 || crtc >= rdev->num_crtc) return; - spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags); + spin_lock_irqsave(&rdev->irq.lock, irqflags); BUG_ON(rdev->ddev->irq_enabled && rdev->irq.pflip_refcount[crtc] <= 0); if (rdev->ddev->irq_enabled && (--rdev->irq.pflip_refcount[crtc] == 0)) { rdev->irq.pflip[crtc] = false; radeon_irq_set(rdev); } - spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags); + spin_unlock_irqrestore(&rdev->irq.lock, irqflags); } +void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block) +{ + unsigned long irqflags; + + spin_lock_irqsave(&rdev->irq.lock, irqflags); + rdev->irq.afmt[block] = true; + radeon_irq_set(rdev); + spin_unlock_irqrestore(&rdev->irq.lock, irqflags); + +} + +void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block) +{ + unsigned long irqflags; + + spin_lock_irqsave(&rdev->irq.lock, irqflags); + rdev->irq.afmt[block] = false; + radeon_irq_set(rdev); + spin_unlock_irqrestore(&rdev->irq.lock, irqflags); +} + +void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask) +{ + unsigned long irqflags; + int i; + + spin_lock_irqsave(&rdev->irq.lock, irqflags); + for (i = 0; i < RADEON_MAX_HPD_PINS; ++i) + rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i)); + radeon_irq_set(rdev); + spin_unlock_irqrestore(&rdev->irq.lock, irqflags); +} + +void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask) +{ + unsigned long irqflags; + int i; + + spin_lock_irqsave(&rdev->irq.lock, irqflags); + for (i = 0; i < RADEON_MAX_HPD_PINS; ++i) + rdev->irq.hpd[i] &= !(hpd_mask & (1 << i)); + radeon_irq_set(rdev); + spin_unlock_irqrestore(&rdev->irq.lock, irqflags); +} + +int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev) +{ + unsigned long irqflags; + int r; + + spin_lock_irqsave(&rdev->irq.lock, irqflags); + rdev->irq.gui_idle = true; + radeon_irq_set(rdev); + spin_unlock_irqrestore(&rdev->irq.lock, irqflags); + + r = wait_event_timeout(rdev->irq.idle_queue, radeon_gui_idle(rdev), + msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT)); + + spin_lock_irqsave(&rdev->irq.lock, irqflags); + rdev->irq.gui_idle = false; + radeon_irq_set(rdev); + spin_unlock_irqrestore(&rdev->irq.lock, irqflags); + return r; +} diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 5c58d7d90cb2..18b81d63ee7b 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -382,29 +382,35 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) int radeon_enable_vblank_kms(struct drm_device *dev, int crtc) { struct radeon_device *rdev = dev->dev_private; + unsigned long irqflags; + int r; if (crtc < 0 || crtc >= rdev->num_crtc) { DRM_ERROR("Invalid crtc %d\n", crtc); return -EINVAL; } + spin_lock_irqsave(&rdev->irq.lock, irqflags); rdev->irq.crtc_vblank_int[crtc] = true; - - return radeon_irq_set(rdev); + r = radeon_irq_set(rdev); + spin_unlock_irqrestore(&rdev->irq.lock, irqflags); + return r; } void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) { struct radeon_device *rdev = dev->dev_private; + unsigned long irqflags; if (crtc < 0 || crtc >= rdev->num_crtc) { DRM_ERROR("Invalid crtc %d\n", crtc); return; } + spin_lock_irqsave(&rdev->irq.lock, irqflags); rdev->irq.crtc_vblank_int[crtc] = false; - radeon_irq_set(rdev); + spin_unlock_irqrestore(&rdev->irq.lock, irqflags); } int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 9dc0b54fe3e3..7ae606600107 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -34,7 +34,6 @@ #define RADEON_IDLE_LOOP_MS 100 #define RADEON_RECLOCK_DELAY_MS 200 #define RADEON_WAIT_VBLANK_TIMEOUT 200 -#define RADEON_WAIT_IDLE_TIMEOUT 200 static const char *radeon_pm_state_type_name[5] = { "Default", @@ -257,15 +256,8 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) /* gui idle int has issues on older chips it seems */ if (rdev->family >= CHIP_R600) { if (rdev->irq.installed) { - /* wait for GPU idle */ - rdev->pm.gui_idle = false; - rdev->irq.gui_idle = true; - radeon_irq_set(rdev); - wait_event_interruptible_timeout( - rdev->irq.idle_queue, rdev->pm.gui_idle, - msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT)); - rdev->irq.gui_idle = false; - radeon_irq_set(rdev); + /* wait for GPU to become idle */ + radeon_irq_kms_wait_gui_idle(rdev); } } else { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index e95c5e61d4e2..f9aee25023cc 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -294,6 +294,7 @@ void rs600_hpd_init(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; struct drm_connector *connector; + unsigned enable = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); @@ -301,26 +302,25 @@ void rs600_hpd_init(struct radeon_device *rdev) case RADEON_HPD_1: WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, S_007D00_DC_HOT_PLUG_DETECT1_EN(1)); - rdev->irq.hpd[0] = true; break; case RADEON_HPD_2: WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, S_007D10_DC_HOT_PLUG_DETECT2_EN(1)); - rdev->irq.hpd[1] = true; break; default: break; } + enable |= 1 << radeon_connector->hpd.hpd; radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); } - if (rdev->irq.installed) - rs600_irq_set(rdev); + radeon_irq_kms_enable_hpd(rdev, enable); } void rs600_hpd_fini(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; struct drm_connector *connector; + unsigned disable = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); @@ -328,17 +328,17 @@ void rs600_hpd_fini(struct radeon_device *rdev) case RADEON_HPD_1: WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, S_007D00_DC_HOT_PLUG_DETECT1_EN(0)); - rdev->irq.hpd[0] = false; break; case RADEON_HPD_2: WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, S_007D10_DC_HOT_PLUG_DETECT2_EN(0)); - rdev->irq.hpd[1] = false; break; default: break; } + disable |= 1 << radeon_connector->hpd.hpd; } + radeon_irq_kms_disable_hpd(rdev, disable); } int rs600_asic_reset(struct radeon_device *rdev) @@ -686,7 +686,6 @@ int rs600_irq_process(struct radeon_device *rdev) /* GUI idle */ if (G_000040_GUI_IDLE(status)) { rdev->irq.gui_idle_acked = true; - rdev->pm.gui_idle = true; wake_up(&rdev->irq.idle_queue); } /* Vertical blank interrupts */ diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index ecef972050d1..a65045bccaf6 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -3617,7 +3617,6 @@ restart_ih: break; case 233: /* GUI IDLE */ DRM_DEBUG("IH: GUI idle\n"); - rdev->pm.gui_idle = true; wake_up(&rdev->irq.idle_queue); break; default: -- cgit v1.2.3 From 736fc37fd7c7634e939e9ec0c67765941913bb82 Mon Sep 17 00:00:00 2001 From: Christian Koenig Date: Thu, 17 May 2012 19:52:00 +0200 Subject: drm/radeon: replace pflip and sw_int counters with atomics So we can skip the locking. Also renames sw_int to ring_int, cause that better matches its purpose. Signed-off-by: Christian Koenig --- drivers/gpu/drm/radeon/evergreen.c | 32 +++++++++--------- drivers/gpu/drm/radeon/r100.c | 10 +++--- drivers/gpu/drm/radeon/r600.c | 10 +++--- drivers/gpu/drm/radeon/radeon.h | 6 ++-- drivers/gpu/drm/radeon/radeon_irq_kms.c | 59 ++++++++++++++++----------------- drivers/gpu/drm/radeon/rs600.c | 10 +++--- drivers/gpu/drm/radeon/si.c | 30 ++++++++--------- 7 files changed, 76 insertions(+), 81 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index bdc1f30d7474..f716e081c813 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2340,20 +2340,20 @@ int evergreen_irq_set(struct radeon_device *rdev) if (rdev->family >= CHIP_CAYMAN) { /* enable CP interrupts on all rings */ - if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { + if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { DRM_DEBUG("evergreen_irq_set: sw int gfx\n"); cp_int_cntl |= TIME_STAMP_INT_ENABLE; } - if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) { + if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) { DRM_DEBUG("evergreen_irq_set: sw int cp1\n"); cp_int_cntl1 |= TIME_STAMP_INT_ENABLE; } - if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) { + if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) { DRM_DEBUG("evergreen_irq_set: sw int cp2\n"); cp_int_cntl2 |= TIME_STAMP_INT_ENABLE; } } else { - if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { + if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { DRM_DEBUG("evergreen_irq_set: sw int gfx\n"); cp_int_cntl |= RB_INT_ENABLE; cp_int_cntl |= TIME_STAMP_INT_ENABLE; @@ -2361,32 +2361,32 @@ int evergreen_irq_set(struct radeon_device *rdev) } if (rdev->irq.crtc_vblank_int[0] || - rdev->irq.pflip[0]) { + atomic_read(&rdev->irq.pflip[0])) { DRM_DEBUG("evergreen_irq_set: vblank 0\n"); crtc1 |= VBLANK_INT_MASK; } if (rdev->irq.crtc_vblank_int[1] || - rdev->irq.pflip[1]) { + atomic_read(&rdev->irq.pflip[1])) { DRM_DEBUG("evergreen_irq_set: vblank 1\n"); crtc2 |= VBLANK_INT_MASK; } if (rdev->irq.crtc_vblank_int[2] || - rdev->irq.pflip[2]) { + atomic_read(&rdev->irq.pflip[2])) { DRM_DEBUG("evergreen_irq_set: vblank 2\n"); crtc3 |= VBLANK_INT_MASK; } if (rdev->irq.crtc_vblank_int[3] || - rdev->irq.pflip[3]) { + atomic_read(&rdev->irq.pflip[3])) { DRM_DEBUG("evergreen_irq_set: vblank 3\n"); crtc4 |= VBLANK_INT_MASK; } if (rdev->irq.crtc_vblank_int[4] || - rdev->irq.pflip[4]) { + atomic_read(&rdev->irq.pflip[4])) { DRM_DEBUG("evergreen_irq_set: vblank 4\n"); crtc5 |= VBLANK_INT_MASK; } if (rdev->irq.crtc_vblank_int[5] || - rdev->irq.pflip[5]) { + atomic_read(&rdev->irq.pflip[5])) { DRM_DEBUG("evergreen_irq_set: vblank 5\n"); crtc6 |= VBLANK_INT_MASK; } @@ -2706,7 +2706,7 @@ restart_ih: rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } - if (rdev->irq.pflip[0]) + if (atomic_read(&rdev->irq.pflip[0])) radeon_crtc_handle_flip(rdev, 0); rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; DRM_DEBUG("IH: D1 vblank\n"); @@ -2732,7 +2732,7 @@ restart_ih: rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } - if (rdev->irq.pflip[1]) + if (atomic_read(&rdev->irq.pflip[1])) radeon_crtc_handle_flip(rdev, 1); rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; DRM_DEBUG("IH: D2 vblank\n"); @@ -2758,7 +2758,7 @@ restart_ih: rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } - if (rdev->irq.pflip[2]) + if (atomic_read(&rdev->irq.pflip[2])) radeon_crtc_handle_flip(rdev, 2); rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; DRM_DEBUG("IH: D3 vblank\n"); @@ -2784,7 +2784,7 @@ restart_ih: rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } - if (rdev->irq.pflip[3]) + if (atomic_read(&rdev->irq.pflip[3])) radeon_crtc_handle_flip(rdev, 3); rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; DRM_DEBUG("IH: D4 vblank\n"); @@ -2810,7 +2810,7 @@ restart_ih: rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } - if (rdev->irq.pflip[4]) + if (atomic_read(&rdev->irq.pflip[4])) radeon_crtc_handle_flip(rdev, 4); rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; DRM_DEBUG("IH: D5 vblank\n"); @@ -2836,7 +2836,7 @@ restart_ih: rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } - if (rdev->irq.pflip[5]) + if (atomic_read(&rdev->irq.pflip[5])) radeon_crtc_handle_flip(rdev, 5); rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; DRM_DEBUG("IH: D6 vblank\n"); diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index e8fe4ae3bc23..35825bf1e790 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -689,18 +689,18 @@ int r100_irq_set(struct radeon_device *rdev) WREG32(R_000040_GEN_INT_CNTL, 0); return -EINVAL; } - if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { + if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { tmp |= RADEON_SW_INT_ENABLE; } if (rdev->irq.gui_idle) { tmp |= RADEON_GUI_IDLE_MASK; } if (rdev->irq.crtc_vblank_int[0] || - rdev->irq.pflip[0]) { + atomic_read(&rdev->irq.pflip[0])) { tmp |= RADEON_CRTC_VBLANK_MASK; } if (rdev->irq.crtc_vblank_int[1] || - rdev->irq.pflip[1]) { + atomic_read(&rdev->irq.pflip[1])) { tmp |= RADEON_CRTC2_VBLANK_MASK; } if (rdev->irq.hpd[0]) { @@ -775,7 +775,7 @@ int r100_irq_process(struct radeon_device *rdev) rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } - if (rdev->irq.pflip[0]) + if (atomic_read(&rdev->irq.pflip[0])) radeon_crtc_handle_flip(rdev, 0); } if (status & RADEON_CRTC2_VBLANK_STAT) { @@ -784,7 +784,7 @@ int r100_irq_process(struct radeon_device *rdev) rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } - if (rdev->irq.pflip[1]) + if (atomic_read(&rdev->irq.pflip[1])) radeon_crtc_handle_flip(rdev, 1); } if (status & RADEON_FP_DETECT_STAT) { diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 9cd77286542c..43d0c41922a5 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -3025,18 +3025,18 @@ int r600_irq_set(struct radeon_device *rdev) hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; } - if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { + if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { DRM_DEBUG("r600_irq_set: sw int\n"); cp_int_cntl |= RB_INT_ENABLE; cp_int_cntl |= TIME_STAMP_INT_ENABLE; } if (rdev->irq.crtc_vblank_int[0] || - rdev->irq.pflip[0]) { + atomic_read(&rdev->irq.pflip[0])) { DRM_DEBUG("r600_irq_set: vblank 0\n"); mode_int |= D1MODE_VBLANK_INT_MASK; } if (rdev->irq.crtc_vblank_int[1] || - rdev->irq.pflip[1]) { + atomic_read(&rdev->irq.pflip[1])) { DRM_DEBUG("r600_irq_set: vblank 1\n"); mode_int |= D2MODE_VBLANK_INT_MASK; } @@ -3334,7 +3334,7 @@ restart_ih: rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } - if (rdev->irq.pflip[0]) + if (atomic_read(&rdev->irq.pflip[0])) radeon_crtc_handle_flip(rdev, 0); rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT; DRM_DEBUG("IH: D1 vblank\n"); @@ -3360,7 +3360,7 @@ restart_ih: rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } - if (rdev->irq.pflip[1]) + if (atomic_read(&rdev->irq.pflip[1])) radeon_crtc_handle_flip(rdev, 1); rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT; DRM_DEBUG("IH: D2 vblank\n"); diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 353a1830514a..126bac5079fd 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -617,11 +617,9 @@ union radeon_irq_stat_regs { struct radeon_irq { bool installed; spinlock_t lock; - bool sw_int[RADEON_NUM_RINGS]; - int sw_refcount[RADEON_NUM_RINGS]; + atomic_t ring_int[RADEON_NUM_RINGS]; bool crtc_vblank_int[RADEON_MAX_CRTCS]; - bool pflip[RADEON_MAX_CRTCS]; - int pflip_refcount[RADEON_MAX_CRTCS]; + atomic_t pflip[RADEON_MAX_CRTCS]; wait_queue_head_t vblank_queue; bool hpd[RADEON_MAX_HPD_PINS]; bool gui_idle; diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index c5eb7a1461cf..6664514bbdca 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -70,13 +70,13 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) spin_lock_irqsave(&rdev->irq.lock, irqflags); /* Disable *all* interrupts */ for (i = 0; i < RADEON_NUM_RINGS; i++) - rdev->irq.sw_int[i] = false; + atomic_set(&rdev->irq.ring_int[i], 0); rdev->irq.gui_idle = false; for (i = 0; i < RADEON_MAX_HPD_PINS; i++) rdev->irq.hpd[i] = false; for (i = 0; i < RADEON_MAX_CRTCS; i++) { rdev->irq.crtc_vblank_int[i] = false; - rdev->irq.pflip[i] = false; + atomic_set(&rdev->irq.pflip[i], 0); rdev->irq.afmt[i] = false; } radeon_irq_set(rdev); @@ -87,16 +87,7 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) int radeon_driver_irq_postinstall_kms(struct drm_device *dev) { - struct radeon_device *rdev = dev->dev_private; - unsigned long irqflags; - unsigned i; - dev->max_vblank_count = 0x001fffff; - spin_lock_irqsave(&rdev->irq.lock, irqflags); - for (i = 0; i < RADEON_NUM_RINGS; i++) - rdev->irq.sw_int[i] = true; - radeon_irq_set(rdev); - spin_unlock_irqrestore(&rdev->irq.lock, irqflags); return 0; } @@ -112,13 +103,13 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) spin_lock_irqsave(&rdev->irq.lock, irqflags); /* Disable *all* interrupts */ for (i = 0; i < RADEON_NUM_RINGS; i++) - rdev->irq.sw_int[i] = false; + atomic_set(&rdev->irq.ring_int[i], 0); rdev->irq.gui_idle = false; for (i = 0; i < RADEON_MAX_HPD_PINS; i++) rdev->irq.hpd[i] = false; for (i = 0; i < RADEON_MAX_CRTCS; i++) { rdev->irq.crtc_vblank_int[i] = false; - rdev->irq.pflip[i] = false; + atomic_set(&rdev->irq.pflip[i], 0); rdev->irq.afmt[i] = false; } radeon_irq_set(rdev); @@ -225,25 +216,28 @@ void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring) { unsigned long irqflags; - spin_lock_irqsave(&rdev->irq.lock, irqflags); - if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount[ring] == 1)) { - rdev->irq.sw_int[ring] = true; + if (!rdev->ddev->irq_enabled) + return; + + if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) { + spin_lock_irqsave(&rdev->irq.lock, irqflags); radeon_irq_set(rdev); + spin_unlock_irqrestore(&rdev->irq.lock, irqflags); } - spin_unlock_irqrestore(&rdev->irq.lock, irqflags); } void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring) { unsigned long irqflags; - spin_lock_irqsave(&rdev->irq.lock, irqflags); - BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount[ring] <= 0); - if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount[ring] == 0)) { - rdev->irq.sw_int[ring] = false; + if (!rdev->ddev->irq_enabled) + return; + + if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) { + spin_lock_irqsave(&rdev->irq.lock, irqflags); radeon_irq_set(rdev); + spin_unlock_irqrestore(&rdev->irq.lock, irqflags); } - spin_unlock_irqrestore(&rdev->irq.lock, irqflags); } void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc) @@ -253,12 +247,14 @@ void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc) if (crtc < 0 || crtc >= rdev->num_crtc) return; - spin_lock_irqsave(&rdev->irq.lock, irqflags); - if (rdev->ddev->irq_enabled && (++rdev->irq.pflip_refcount[crtc] == 1)) { - rdev->irq.pflip[crtc] = true; + if (!rdev->ddev->irq_enabled) + return; + + if (atomic_inc_return(&rdev->irq.pflip[crtc]) == 1) { + spin_lock_irqsave(&rdev->irq.lock, irqflags); radeon_irq_set(rdev); + spin_unlock_irqrestore(&rdev->irq.lock, irqflags); } - spin_unlock_irqrestore(&rdev->irq.lock, irqflags); } void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc) @@ -268,13 +264,14 @@ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc) if (crtc < 0 || crtc >= rdev->num_crtc) return; - spin_lock_irqsave(&rdev->irq.lock, irqflags); - BUG_ON(rdev->ddev->irq_enabled && rdev->irq.pflip_refcount[crtc] <= 0); - if (rdev->ddev->irq_enabled && (--rdev->irq.pflip_refcount[crtc] == 0)) { - rdev->irq.pflip[crtc] = false; + if (!rdev->ddev->irq_enabled) + return; + + if (atomic_dec_and_test(&rdev->irq.pflip[crtc])) { + spin_lock_irqsave(&rdev->irq.lock, irqflags); radeon_irq_set(rdev); + spin_unlock_irqrestore(&rdev->irq.lock, irqflags); } - spin_unlock_irqrestore(&rdev->irq.lock, irqflags); } void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block) diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index f9aee25023cc..e11bc4651784 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -564,18 +564,18 @@ int rs600_irq_set(struct radeon_device *rdev) WREG32(R_000040_GEN_INT_CNTL, 0); return -EINVAL; } - if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { + if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { tmp |= S_000040_SW_INT_EN(1); } if (rdev->irq.gui_idle) { tmp |= S_000040_GUI_IDLE(1); } if (rdev->irq.crtc_vblank_int[0] || - rdev->irq.pflip[0]) { + atomic_read(&rdev->irq.pflip[0])) { mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); } if (rdev->irq.crtc_vblank_int[1] || - rdev->irq.pflip[1]) { + atomic_read(&rdev->irq.pflip[1])) { mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); } if (rdev->irq.hpd[0]) { @@ -695,7 +695,7 @@ int rs600_irq_process(struct radeon_device *rdev) rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } - if (rdev->irq.pflip[0]) + if (atomic_read(&rdev->irq.pflip[0])) radeon_crtc_handle_flip(rdev, 0); } if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { @@ -704,7 +704,7 @@ int rs600_irq_process(struct radeon_device *rdev) rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } - if (rdev->irq.pflip[1]) + if (atomic_read(&rdev->irq.pflip[1])) radeon_crtc_handle_flip(rdev, 1); } if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index a65045bccaf6..34603b3c80ab 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -3092,45 +3092,45 @@ int si_irq_set(struct radeon_device *rdev) hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; /* enable CP interrupts on all rings */ - if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { + if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { DRM_DEBUG("si_irq_set: sw int gfx\n"); cp_int_cntl |= TIME_STAMP_INT_ENABLE; } - if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) { + if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) { DRM_DEBUG("si_irq_set: sw int cp1\n"); cp_int_cntl1 |= TIME_STAMP_INT_ENABLE; } - if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) { + if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) { DRM_DEBUG("si_irq_set: sw int cp2\n"); cp_int_cntl2 |= TIME_STAMP_INT_ENABLE; } if (rdev->irq.crtc_vblank_int[0] || - rdev->irq.pflip[0]) { + atomic_read(&rdev->irq.pflip[0])) { DRM_DEBUG("si_irq_set: vblank 0\n"); crtc1 |= VBLANK_INT_MASK; } if (rdev->irq.crtc_vblank_int[1] || - rdev->irq.pflip[1]) { + atomic_read(&rdev->irq.pflip[1])) { DRM_DEBUG("si_irq_set: vblank 1\n"); crtc2 |= VBLANK_INT_MASK; } if (rdev->irq.crtc_vblank_int[2] || - rdev->irq.pflip[2]) { + atomic_read(&rdev->irq.pflip[2])) { DRM_DEBUG("si_irq_set: vblank 2\n"); crtc3 |= VBLANK_INT_MASK; } if (rdev->irq.crtc_vblank_int[3] || - rdev->irq.pflip[3]) { + atomic_read(&rdev->irq.pflip[3])) { DRM_DEBUG("si_irq_set: vblank 3\n"); crtc4 |= VBLANK_INT_MASK; } if (rdev->irq.crtc_vblank_int[4] || - rdev->irq.pflip[4]) { + atomic_read(&rdev->irq.pflip[4])) { DRM_DEBUG("si_irq_set: vblank 4\n"); crtc5 |= VBLANK_INT_MASK; } if (rdev->irq.crtc_vblank_int[5] || - rdev->irq.pflip[5]) { + atomic_read(&rdev->irq.pflip[5])) { DRM_DEBUG("si_irq_set: vblank 5\n"); crtc6 |= VBLANK_INT_MASK; } @@ -3396,7 +3396,7 @@ restart_ih: rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } - if (rdev->irq.pflip[0]) + if (atomic_read(&rdev->irq.pflip[0])) radeon_crtc_handle_flip(rdev, 0); rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; DRM_DEBUG("IH: D1 vblank\n"); @@ -3422,7 +3422,7 @@ restart_ih: rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } - if (rdev->irq.pflip[1]) + if (atomic_read(&rdev->irq.pflip[1])) radeon_crtc_handle_flip(rdev, 1); rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; DRM_DEBUG("IH: D2 vblank\n"); @@ -3448,7 +3448,7 @@ restart_ih: rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } - if (rdev->irq.pflip[2]) + if (atomic_read(&rdev->irq.pflip[2])) radeon_crtc_handle_flip(rdev, 2); rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; DRM_DEBUG("IH: D3 vblank\n"); @@ -3474,7 +3474,7 @@ restart_ih: rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } - if (rdev->irq.pflip[3]) + if (atomic_read(&rdev->irq.pflip[3])) radeon_crtc_handle_flip(rdev, 3); rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; DRM_DEBUG("IH: D4 vblank\n"); @@ -3500,7 +3500,7 @@ restart_ih: rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } - if (rdev->irq.pflip[4]) + if (atomic_read(&rdev->irq.pflip[4])) radeon_crtc_handle_flip(rdev, 4); rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; DRM_DEBUG("IH: D5 vblank\n"); @@ -3526,7 +3526,7 @@ restart_ih: rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } - if (rdev->irq.pflip[5]) + if (atomic_read(&rdev->irq.pflip[5])) radeon_crtc_handle_flip(rdev, 5); rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; DRM_DEBUG("IH: D6 vblank\n"); -- cgit v1.2.3 From 36ff39c4045ee71cd306f8af5f8c2a1c6e998eba Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 9 May 2012 10:07:08 +0200 Subject: drm/radeon: replace cs_mutex with vm_mutex v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Try to remove or replace the cs_mutex with a vm_mutex where it is still needed. v2: fix locking order v3: rebased on drm-next Signed-off-by: Christian König --- drivers/gpu/drm/radeon/radeon.h | 44 +--------------------------------- drivers/gpu/drm/radeon/radeon_cs.c | 9 +++---- drivers/gpu/drm/radeon/radeon_device.c | 2 +- drivers/gpu/drm/radeon/radeon_gart.c | 24 +++++++++---------- drivers/gpu/drm/radeon/radeon_gem.c | 2 -- 5 files changed, 17 insertions(+), 64 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 126bac5079fd..77b4519b19b8 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -159,48 +159,6 @@ static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len) #endif bool radeon_get_bios(struct radeon_device *rdev); - -/* - * Mutex which allows recursive locking from the same process. - */ -struct radeon_mutex { - struct mutex mutex; - struct task_struct *owner; - int level; -}; - -static inline void radeon_mutex_init(struct radeon_mutex *mutex) -{ - mutex_init(&mutex->mutex); - mutex->owner = NULL; - mutex->level = 0; -} - -static inline void radeon_mutex_lock(struct radeon_mutex *mutex) -{ - if (mutex_trylock(&mutex->mutex)) { - /* The mutex was unlocked before, so it's ours now */ - mutex->owner = current; - } else if (mutex->owner != current) { - /* Another process locked the mutex, take it */ - mutex_lock(&mutex->mutex); - mutex->owner = current; - } - /* Otherwise the mutex was already locked by this process */ - - mutex->level++; -} - -static inline void radeon_mutex_unlock(struct radeon_mutex *mutex) -{ - if (--mutex->level > 0) - return; - - mutex->owner = NULL; - mutex_unlock(&mutex->mutex); -} - - /* * Dummy page */ @@ -712,6 +670,7 @@ struct radeon_vm_funcs { }; struct radeon_vm_manager { + struct mutex lock; struct list_head lru_vm; uint32_t use_bitmap; struct radeon_sa_manager sa_manager; @@ -1532,7 +1491,6 @@ struct radeon_device { struct radeon_gem gem; struct radeon_pm pm; uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; - struct radeon_mutex cs_mutex; struct radeon_wb wb; struct radeon_dummy_page dummy_page; bool shutdown; diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index dd3e234294e4..f1b75275f594 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -440,6 +440,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, return r; } + mutex_lock(&rdev->vm_manager.lock); mutex_lock(&vm->mutex); r = radeon_vm_bind(rdev, vm); if (r) { @@ -477,7 +478,8 @@ out: } vm->fence = radeon_fence_ref(parser->ib.fence); } - mutex_unlock(&fpriv->vm.mutex); + mutex_unlock(&vm->mutex); + mutex_unlock(&rdev->vm_manager.lock); return r; } @@ -497,9 +499,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) struct radeon_cs_parser parser; int r; - radeon_mutex_lock(&rdev->cs_mutex); if (!rdev->accel_working) { - radeon_mutex_unlock(&rdev->cs_mutex); return -EBUSY; } /* initialize parser */ @@ -513,7 +513,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) DRM_ERROR("Failed to initialize parser !\n"); radeon_cs_parser_fini(&parser, r); r = radeon_cs_handle_lockup(rdev, r); - radeon_mutex_unlock(&rdev->cs_mutex); return r; } r = radeon_cs_parser_relocs(&parser); @@ -522,7 +521,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) DRM_ERROR("Failed to parse relocation %d!\n", r); radeon_cs_parser_fini(&parser, r); r = radeon_cs_handle_lockup(rdev, r); - radeon_mutex_unlock(&rdev->cs_mutex); return r; } r = radeon_cs_ib_chunk(rdev, &parser); @@ -536,7 +534,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) out: radeon_cs_parser_fini(&parser, r); r = radeon_cs_handle_lockup(rdev, r); - radeon_mutex_unlock(&rdev->cs_mutex); return r; } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 3c563d1f9671..f654ba80e96e 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -728,7 +728,6 @@ int radeon_device_init(struct radeon_device *rdev, /* mutex initialization are all done here so we * can recall function without having locking issues */ - radeon_mutex_init(&rdev->cs_mutex); mutex_init(&rdev->ring_lock); mutex_init(&rdev->dc_hw_i2c_mutex); atomic_set(&rdev->ih.lock, 0); @@ -741,6 +740,7 @@ int radeon_device_init(struct radeon_device *rdev, if (r) return r; /* initialize vm here */ + mutex_init(&rdev->vm_manager.lock); rdev->vm_manager.use_bitmap = 1; rdev->vm_manager.max_pfn = 1 << 20; INIT_LIST_HEAD(&rdev->vm_manager.lru_vm); diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 59d44937dd9f..2b34c1a91421 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -305,7 +305,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev) return r; } -/* cs mutex must be lock */ +/* global mutex must be lock */ static void radeon_vm_unbind_locked(struct radeon_device *rdev, struct radeon_vm *vm) { @@ -356,17 +356,17 @@ int radeon_vm_manager_suspend(struct radeon_device *rdev) { struct radeon_vm *vm, *tmp; - radeon_mutex_lock(&rdev->cs_mutex); + mutex_lock(&rdev->vm_manager.lock); /* unbind all active vm */ list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) { radeon_vm_unbind_locked(rdev, vm); } rdev->vm_manager.funcs->fini(rdev); - radeon_mutex_unlock(&rdev->cs_mutex); + mutex_unlock(&rdev->vm_manager.lock); return radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager); } -/* cs mutex must be lock */ +/* global mutex must be locked */ void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm) { mutex_lock(&vm->mutex); @@ -374,7 +374,7 @@ void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm) mutex_unlock(&vm->mutex); } -/* cs mutex must be lock & vm mutex must be lock */ +/* global and local mutex must be locked */ int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm) { struct radeon_vm *vm_evict; @@ -478,7 +478,7 @@ int radeon_vm_bo_add(struct radeon_device *rdev, if (last_pfn > vm->last_pfn) { /* release mutex and lock in right order */ mutex_unlock(&vm->mutex); - radeon_mutex_lock(&rdev->cs_mutex); + mutex_lock(&rdev->vm_manager.lock); mutex_lock(&vm->mutex); /* and check again */ if (last_pfn > vm->last_pfn) { @@ -487,7 +487,7 @@ int radeon_vm_bo_add(struct radeon_device *rdev, radeon_vm_unbind_locked(rdev, vm); vm->last_pfn = (last_pfn + align) & ~align; } - radeon_mutex_unlock(&rdev->cs_mutex); + mutex_unlock(&rdev->vm_manager.lock); } head = &vm->va; last_offset = 0; @@ -542,7 +542,7 @@ static u64 radeon_vm_get_addr(struct radeon_device *rdev, return addr; } -/* object have to be reserved & cs mutex took & vm mutex took */ +/* object have to be reserved & global and local mutex must be locked */ int radeon_vm_bo_update_pte(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_bo *bo, @@ -601,10 +601,10 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev, if (bo_va == NULL) return 0; - radeon_mutex_lock(&rdev->cs_mutex); + mutex_lock(&rdev->vm_manager.lock); mutex_lock(&vm->mutex); radeon_vm_bo_update_pte(rdev, vm, bo, NULL); - radeon_mutex_unlock(&rdev->cs_mutex); + mutex_unlock(&rdev->vm_manager.lock); list_del(&bo_va->vm_list); mutex_unlock(&vm->mutex); list_del(&bo_va->bo_list); @@ -647,10 +647,10 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) struct radeon_bo_va *bo_va, *tmp; int r; - radeon_mutex_lock(&rdev->cs_mutex); + mutex_lock(&rdev->vm_manager.lock); mutex_lock(&vm->mutex); radeon_vm_unbind_locked(rdev, vm); - radeon_mutex_unlock(&rdev->cs_mutex); + mutex_unlock(&rdev->vm_manager.lock); /* remove all bo */ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index f28bd4b7ef98..d9b0809be4e4 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -159,11 +159,9 @@ void radeon_gem_object_close(struct drm_gem_object *obj, static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) { if (r == -EDEADLK) { - radeon_mutex_lock(&rdev->cs_mutex); r = radeon_gpu_reset(rdev); if (!r) r = -EAGAIN; - radeon_mutex_unlock(&rdev->cs_mutex); } return r; } -- cgit v1.2.3 From b375de0b09ef1659c28f365a9a8e947c311f77e5 Mon Sep 17 00:00:00 2001 From: Sachin Kamat Date: Mon, 18 Jun 2012 11:09:56 +0530 Subject: drm: Add missing static storage class specifier Fixes the following sparse warning: drivers/gpu/drm/drm_info.c:238:5: warning: symbol 'drm_gem_one_name_info' was not declared. Should it be static? Signed-off-by: Sachin Kamat Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_info.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index ab1162da70f8..4076a33e5cad 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c @@ -235,7 +235,7 @@ int drm_clients_info(struct seq_file *m, void *data) } -int drm_gem_one_name_info(int id, void *ptr, void *data) +static int drm_gem_one_name_info(int id, void *ptr, void *data) { struct drm_gem_object *obj = ptr; struct seq_file *m = data; -- cgit v1.2.3 From 4ef7fe7c66d4653930f67566a1df1c8f2d6daa6b Mon Sep 17 00:00:00 2001 From: Yuanhan Liu Date: Sat, 16 Jun 2012 22:25:24 +0800 Subject: drm: use format %d to print error code It is more readable by printing "ret = -1" than "ret = 0xffffffff" Signed-off-by: Yuanhan Liu Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 8a9d0792e4ec..946bd91c57ec 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -486,7 +486,7 @@ long drm_ioctl(struct file *filp, kfree(kdata); atomic_dec(&dev->ioctl_count); if (retcode) - DRM_DEBUG("ret = %x\n", retcode); + DRM_DEBUG("ret = %d\n", retcode); return retcode; } -- cgit v1.2.3 From 9756fe38d10b2bf90c81dc4d2f17d5632e135364 Mon Sep 17 00:00:00 2001 From: Sjoerd Simons Date: Fri, 22 Jun 2012 09:43:07 +0200 Subject: drm/i915: no lvds quirk for Zotac ZDBOX SD ID12/ID13 This box claims to have an LVDS interface but doesn't actually have one. Signed-off-by: Sjoerd Simons Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_lvds.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index ab4d64792616..05fcadbeac66 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -778,6 +778,14 @@ static const struct dmi_system_id intel_no_lvds[] = { DMI_MATCH(DMI_BOARD_NAME, "MS-7469"), }, }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "ZOTAC ZBOXSD-ID12/ID13", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"), + DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"), + }, + }, { } /* terminating entry */ }; -- cgit v1.2.3 From 8090c6b9daa04dda649ac0a2209601042abfb0a4 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 24 Jun 2012 16:42:32 +0200 Subject: drm/i915: wrap up gt powersave enabling functions ... instead of calling each one for each generation indiviudally. Notice that we've already managed to be inconsistent, the resume path is missing an IS_VLV check. As a nice benefit we can mark all the platform specific enable/disable functions as static and hide them in intel_pm.c Reviewed-by: Eugeni Dodonov Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_suspend.c | 5 +---- drivers/gpu/drm/i915/intel_display.c | 21 ++------------------ drivers/gpu/drm/i915/intel_drv.h | 9 ++------- drivers/gpu/drm/i915/intel_pm.c | 37 +++++++++++++++++++++++++++++------- 4 files changed, 35 insertions(+), 37 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 0ede02a99d91..740c076ea330 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -825,10 +825,7 @@ int i915_save_state(struct drm_device *dev) dev_priv->saveIMR = I915_READ(IMR); } - if (IS_IRONLAKE_M(dev)) - ironlake_disable_drps(dev); - if (INTEL_INFO(dev)->gen >= 6) - gen6_disable_rps(dev); + intel_disable_gt_powersave(dev); /* Cache mode state */ dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b3052ef70d16..fdca5b925c6c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -7172,20 +7172,9 @@ static void ivb_pch_pwm_override(struct drm_device *dev) void intel_modeset_init_hw(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - intel_init_clock_gating(dev); - if (IS_IRONLAKE_M(dev)) { - ironlake_enable_drps(dev); - ironlake_enable_rc6(dev); - intel_init_emon(dev); - } - - if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { - gen6_enable_rps(dev_priv); - gen6_update_ring_freq(dev_priv); - } + intel_enable_gt_powersave(dev); if (IS_IVYBRIDGE(dev)) ivb_pch_pwm_override(dev); @@ -7277,13 +7266,7 @@ void intel_modeset_cleanup(struct drm_device *dev) intel_disable_fbc(dev); - if (IS_IRONLAKE_M(dev)) - ironlake_disable_drps(dev); - if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) - gen6_disable_rps(dev); - - if (IS_IRONLAKE_M(dev)) - ironlake_disable_rc6(dev); + intel_disable_gt_powersave(dev); if (IS_VALLEYVIEW(dev)) vlv_init_dpio(dev); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 5290e9df327b..cc1573b2b60b 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -425,9 +425,6 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, int regno); extern void intel_enable_clock_gating(struct drm_device *dev); -extern void ironlake_disable_rc6(struct drm_device *dev); -extern void ironlake_enable_drps(struct drm_device *dev); -extern void ironlake_disable_drps(struct drm_device *dev); extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_i915_gem_object *obj, @@ -494,10 +491,8 @@ extern void intel_update_fbc(struct drm_device *dev); extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); extern void intel_gpu_ips_teardown(void); -extern void gen6_enable_rps(struct drm_i915_private *dev_priv); -extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv); -extern void gen6_disable_rps(struct drm_device *dev); -extern void intel_init_emon(struct drm_device *dev); +extern void intel_enable_gt_powersave(struct drm_device *dev); +extern void intel_disable_gt_powersave(struct drm_device *dev); extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode); extern void intel_ddi_mode_set(struct drm_encoder *encoder, diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 7504fbce05cc..2baba10723f5 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2184,7 +2184,7 @@ bool ironlake_set_drps(struct drm_device *dev, u8 val) return true; } -void ironlake_enable_drps(struct drm_device *dev) +static void ironlake_enable_drps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 rgvmodectl = I915_READ(MEMMODECTL); @@ -2248,7 +2248,7 @@ void ironlake_enable_drps(struct drm_device *dev) getrawmonotonic(&dev_priv->last_time2); } -void ironlake_disable_drps(struct drm_device *dev) +static void ironlake_disable_drps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u16 rgvswctl = I915_READ16(MEMSWCTL); @@ -2301,7 +2301,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val) dev_priv->cur_delay = val; } -void gen6_disable_rps(struct drm_device *dev) +static void gen6_disable_rps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -2349,7 +2349,7 @@ int intel_enable_rc6(const struct drm_device *dev) return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); } -void gen6_enable_rps(struct drm_i915_private *dev_priv) +static void gen6_enable_rps(struct drm_i915_private *dev_priv) { struct intel_ring_buffer *ring; u32 rp_state_cap; @@ -2494,7 +2494,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) mutex_unlock(&dev_priv->dev->struct_mutex); } -void gen6_update_ring_freq(struct drm_i915_private *dev_priv) +static void gen6_update_ring_freq(struct drm_i915_private *dev_priv) { int min_freq = 15; int gpu_freq, ia_freq, max_ia_freq; @@ -3156,8 +3156,7 @@ void intel_gpu_ips_teardown(void) i915_mch_dev = NULL; spin_unlock(&mchdev_lock); } - -void intel_init_emon(struct drm_device *dev) +static void intel_init_emon(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 lcfuse; @@ -3228,6 +3227,30 @@ void intel_init_emon(struct drm_device *dev) dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); } +void intel_disable_gt_powersave(struct drm_device *dev) +{ + if (IS_IRONLAKE_M(dev)) + ironlake_disable_drps(dev); + if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) + gen6_disable_rps(dev); +} + +void intel_enable_gt_powersave(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_IRONLAKE_M(dev)) { + ironlake_enable_drps(dev); + ironlake_enable_rc6(dev); + intel_init_emon(dev); + } + + if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { + gen6_enable_rps(dev_priv); + gen6_update_ring_freq(dev_priv); + } +} + static void ironlake_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; -- cgit v1.2.3 From 79f5b2c7599270aa3dcfffb445f8f520c05a7fc5 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 24 Jun 2012 16:42:33 +0200 Subject: drm/i915: make enable/disable_gt_powersave locking consistent The enable functions grabbed dev->struct_mutex themselves, whereas the disable functions expected dev->struct_mutex to be held by the caller. Move the locking out to the (currently only) callsite of intel_enable_gt_powersave to make this more consistent. Originally this was prep work for future patches, but I've chased down a totally wrong alley. Still, I think this is a sensible clarification. Reviewed-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 2 ++ drivers/gpu/drm/i915/intel_pm.c | 32 +++++++++++++------------------- 2 files changed, 15 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index fdca5b925c6c..3fbc802b4940 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -7174,7 +7174,9 @@ void intel_modeset_init_hw(struct drm_device *dev) { intel_init_clock_gating(dev); + mutex_lock(&dev->struct_mutex); intel_enable_gt_powersave(dev); + mutex_unlock(&dev->struct_mutex); if (IS_IVYBRIDGE(dev)) ivb_pch_pwm_override(dev); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 2baba10723f5..99bc1f33bfcb 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2349,8 +2349,9 @@ int intel_enable_rc6(const struct drm_device *dev) return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); } -static void gen6_enable_rps(struct drm_i915_private *dev_priv) +static void gen6_enable_rps(struct drm_device *dev) { + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; u32 rp_state_cap; u32 gt_perf_status; @@ -2359,6 +2360,8 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv) int rc6_mode; int i; + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + /* Here begins a magic sequence of register writes to enable * auto-downclocking. * @@ -2366,7 +2369,6 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv) * userspace... */ I915_WRITE(GEN6_RC_STATE, 0); - mutex_lock(&dev_priv->dev->struct_mutex); /* Clear the DBG now so we don't confuse earlier errors */ if ((gtfifodbg = I915_READ(GTFIFODBG))) { @@ -2491,15 +2493,17 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_PMINTRMSK, 0); gen6_gt_force_wake_put(dev_priv); - mutex_unlock(&dev_priv->dev->struct_mutex); } -static void gen6_update_ring_freq(struct drm_i915_private *dev_priv) +static void gen6_update_ring_freq(struct drm_device *dev) { + struct drm_i915_private *dev_priv = dev->dev_private; int min_freq = 15; int gpu_freq, ia_freq, max_ia_freq; int scaling_factor = 180; + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + max_ia_freq = cpufreq_quick_get_max(0); /* * Default to measured freq if none found, PCU will ensure we don't go @@ -2511,8 +2515,6 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv) /* Convert from kHz to MHz */ max_ia_freq /= 1000; - mutex_lock(&dev_priv->dev->struct_mutex); - /* * For each potential GPU frequency, load a ring frequency we'd like * to use for memory access. We do this by specifying the IA frequency @@ -2543,8 +2545,6 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv) continue; } } - - mutex_unlock(&dev_priv->dev->struct_mutex); } static void ironlake_teardown_rc6(struct drm_device *dev) @@ -2615,12 +2615,11 @@ void ironlake_enable_rc6(struct drm_device *dev) if (!intel_enable_rc6(dev)) return; - mutex_lock(&dev->struct_mutex); + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + ret = ironlake_setup_rc6(dev); - if (ret) { - mutex_unlock(&dev->struct_mutex); + if (ret) return; - } /* * GPU can automatically power down the render unit if given a page @@ -2629,7 +2628,6 @@ void ironlake_enable_rc6(struct drm_device *dev) ret = intel_ring_begin(ring, 6); if (ret) { ironlake_teardown_rc6(dev); - mutex_unlock(&dev->struct_mutex); return; } @@ -2654,13 +2652,11 @@ void ironlake_enable_rc6(struct drm_device *dev) if (ret) { DRM_ERROR("failed to enable ironlake power power savings\n"); ironlake_teardown_rc6(dev); - mutex_unlock(&dev->struct_mutex); return; } I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); - mutex_unlock(&dev->struct_mutex); } static unsigned long intel_pxfreq(u32 vidfreq) @@ -3237,8 +3233,6 @@ void intel_disable_gt_powersave(struct drm_device *dev) void intel_enable_gt_powersave(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (IS_IRONLAKE_M(dev)) { ironlake_enable_drps(dev); ironlake_enable_rc6(dev); @@ -3246,8 +3240,8 @@ void intel_enable_gt_powersave(struct drm_device *dev) } if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { - gen6_enable_rps(dev_priv); - gen6_update_ring_freq(dev_priv); + gen6_enable_rps(dev); + gen6_update_ring_freq(dev); } } -- cgit v1.2.3 From 87207ca20eeb519aa0333b754db9cf3c369ea6f7 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 24 Jun 2012 20:51:36 +0200 Subject: drm/i915: don't use dev->agp This single leftover use is due to a patch that went into 3.5 through -fixes. With the fake agp stuff on demise, at least for gen6+ we can't use this any more. Reviewed-by: Eugeni Dodonov Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 9563ab8390e7..216651917fd5 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1414,7 +1414,7 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) if (!ap) return; - ap->ranges[0].base = dev_priv->dev->agp->base; + ap->ranges[0].base = dev_priv->mm.gtt->gma_bus_addr; ap->ranges[0].size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; primary = -- cgit v1.2.3 From 01a06850fb45ace55ed67d1d9da2df553a041e40 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 25 Jun 2012 15:58:49 +0200 Subject: drm/i915: disable drm agp support for !gen3 with kms enabled This is the quick&dirty way Dave Airlie suggested to workaround the midlayer drm agp brain-damange. Note that i915_probe is only called when the driver has ksm enabled, so no need to check for that. We also need to move the intel_agp_enabled check at the right place. Note that the only thing this does is enforce the correct module load order (by using a symbol from intel-agp.ko) to ensure that the fake agp driver is ready before the drm core tries to set up the agp stuff. v2: Add a comment to explain why gen3 needs all this legacy fake agp stuff - we've shipped an XvMC library with a kms-enabled ddx that requires it (but only on gen3). v3: Make it clear that this is only a gen3 issue in the comment. Reviewed-by: Chris Wilson Reviewed-by: Eugeni Dodonov Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index a378c0800304..79be8799ea6c 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -930,10 +930,12 @@ int i915_reset(struct drm_device *dev) return 0; } - static int __devinit i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { + struct intel_device_info *intel_info = + (struct intel_device_info *) ent->driver_data; + /* Only bind to function 0 of the device. Early generations * used function 1 as a placeholder for multi-head. This causes * us confusion instead, especially on the systems where both @@ -942,6 +944,18 @@ i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (PCI_FUNC(pdev->devfn)) return -ENODEV; + /* We've managed to ship a kms-enabled ddx that shipped with an XvMC + * implementation for gen3 (and only gen3) that used legacy drm maps + * (gasp!) to share buffers between X and the client. Hence we need to + * keep around the fake agp stuff for gen3, even when kms is enabled. */ + if (intel_info->gen != 3) { + driver.driver_features &= + ~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP); + } else if (!intel_agp_enabled) { + DRM_ERROR("drm/i915 can't work without intel_agp module!\n"); + return -ENODEV; + } + return drm_get_pci_dev(pdev, ent, &driver); } @@ -1102,11 +1116,6 @@ static struct pci_driver i915_pci_driver = { static int __init i915_init(void) { - if (!intel_agp_enabled) { - DRM_ERROR("drm/i915 can't work without intel_agp module!\n"); - return -ENODEV; - } - driver.num_ioctls = i915_max_ioctl; /* -- cgit v1.2.3 From 25c0af768c679e44587f4cf66934d00b84d70061 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 24 Jun 2012 20:51:38 +0200 Subject: agp/intel-agp: remove snb+ host bridge pciids drm/i915 now takes care itself of setting up the gtt for these chips. Reviewed-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/char/agp/intel-agp.c | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 92622d44e12d..b130df0a1958 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -903,17 +903,6 @@ static struct pci_device_id agp_intel_pci_table[] = { ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), - ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB), - ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB), - ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB), - ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB), - ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB), - ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB), - ID(PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB), - ID(PCI_DEVICE_ID_INTEL_HASWELL_HB), - ID(PCI_DEVICE_ID_INTEL_HASWELL_M_HB), - ID(PCI_DEVICE_ID_INTEL_HASWELL_S_HB), - ID(PCI_DEVICE_ID_INTEL_HASWELL_E_HB), { } }; -- cgit v1.2.3 From 0317c6cecd3d6cfd8f920544239a0ba4d3c3f458 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 27 Jun 2012 12:10:30 +0300 Subject: drm/i915/bios: cleanup return type of intel_parse_bios() These are unintuitive. These are type bool and return -1 casted to true on failure. Let's just make it return an int. The callers don't care, but let's change this as a cleanup. Signed-off-by: Dan Carpenter Acked-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/gma500/intel_bios.c | 2 +- drivers/gpu/drm/gma500/intel_bios.h | 2 +- drivers/gpu/drm/i915/intel_bios.c | 2 +- drivers/gpu/drm/i915/intel_bios.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c index 973d7f6d66b7..8d7caf0f363e 100644 --- a/drivers/gpu/drm/gma500/intel_bios.c +++ b/drivers/gpu/drm/gma500/intel_bios.c @@ -427,7 +427,7 @@ parse_device_mapping(struct drm_psb_private *dev_priv, * * Returns 0 on success, nonzero on failure. */ -bool psb_intel_init_bios(struct drm_device *dev) +int psb_intel_init_bios(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; struct pci_dev *pdev = dev->pdev; diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h index 0a738663eb5a..2e95523b84b1 100644 --- a/drivers/gpu/drm/gma500/intel_bios.h +++ b/drivers/gpu/drm/gma500/intel_bios.h @@ -431,7 +431,7 @@ struct bdb_driver_features { u8 custom_vbt_version; } __attribute__((packed)); -extern bool psb_intel_init_bios(struct drm_device *dev); +extern int psb_intel_init_bios(struct drm_device *dev); extern void psb_intel_destroy_bios(struct drm_device *dev); /* diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 353459362f6f..8c6074154bf6 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -692,7 +692,7 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = { * * Returns 0 on success, nonzero on failure. */ -bool +int intel_parse_bios(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index dbda6e3bdf07..31c2107e7825 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -476,7 +476,7 @@ struct bdb_edp { } __attribute__ ((packed)); void intel_setup_bios(struct drm_device *dev); -bool intel_parse_bios(struct drm_device *dev); +int intel_parse_bios(struct drm_device *dev); /* * Driver<->VBIOS interaction occurs through scratch bits in -- cgit v1.2.3 From e86fe0d31722090e3bb72a3e8aab70f07e2c6b77 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Tue, 26 Jun 2012 13:10:11 -0700 Subject: drm/i915: mask tiled bit when updating IVB sprites Or going from tiled to untiled may break. Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_sprite.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 9d7777bc1545..64174c507dd9 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -56,6 +56,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, sprctl &= ~SPRITE_PIXFORMAT_MASK; sprctl &= ~SPRITE_RGB_ORDER_RGBX; sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK; + sprctl &= ~SPRITE_TILED; switch (fb->pixel_format) { case DRM_FORMAT_XBGR8888: -- cgit v1.2.3 From f4d71056482f0c3306aa752fd626883f60deed96 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Tue, 26 Jun 2012 13:10:12 -0700 Subject: drm/i915: correct IVB default sprite format We shouldn't hit this path anyway, but make it use the IVB sprite format definition to avoid confusion. Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_sprite.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 64174c507dd9..b04109789f2e 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -85,7 +85,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, break; default: DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n"); - sprctl |= DVS_FORMAT_RGBX888; + sprctl |= SPRITE_FORMAT_RGBX888; pixel_size = 4; break; } -- cgit v1.2.3 From a8b0bbabf756bfb45a712b823ba41f5c95f85589 Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Wed, 27 Jun 2012 00:55:37 +0200 Subject: drm/i915/sprite: Fix mem leak in intel_plane_init() If we ever hit the default case in the switch statement we'll return from the function without freeing the memory we just allocated to 'intel_plane' (but that has not been used). This patch gets rid of the leak by freeing the memory just before we return. Signed-off-by: Jesper Juhl Reviewed-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_sprite.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index b04109789f2e..1a1483b924d0 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -691,6 +691,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe) break; default: + kfree(intel_plane); return -ENODEV; } @@ -705,4 +706,3 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe) return ret; } - -- cgit v1.2.3 From 97f209bcfc0c5db08d9badf8cbafd489f22a6e44 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 28 Jun 2012 09:48:42 +0200 Subject: drm/i915: "Flush Me Harder" required on gen6+ The prep to remove the flushing list in commit cc889e0f6ce6a63c62db17d702ecfed86d58083f Author: Daniel Vetter Date: Wed Jun 13 20:45:19 2012 +0200 drm/i915: disable flushing_list/gpu_write_list causes quite some decent regressions. We can fix this by setting the CS_STALL bit to ensure that the following seqno write happens only after the cache flush has completed. But only do that when the caller actually wants the flush (and not also when we invalidate caches before starting the next batch). I've looked through all our ancient scrolls about gen6+ pipe control workarounds, and this seems to be indeed a legal combination: We're allowed to set the CS_STALL bit when we flush the render cache (which we do). While yelling at this code, also pass back the return value from intel_emit_post_sync_nonzero_flush properly. v2: Instead of emitting more pipe controls, set the CS_STALL bit on the write flush as suggested by Chris Wilson. It seems to work, too. Cc: Eric Anholt Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=51436 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=51429 Tested-by: Lu Hua Tested-by: Chris Wilson Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index f30a53a8917e..dce4d1a492a8 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -219,7 +219,9 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, int ret; /* Force SNB workarounds for PIPE_CONTROL flushes */ - intel_emit_post_sync_nonzero_flush(ring); + ret = intel_emit_post_sync_nonzero_flush(ring); + if (ret) + return ret; /* Just flush everything. Experiments have shown that reducing the * number of bits based on the write domains has little performance @@ -233,6 +235,12 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; + /* + * Ensure that any following seqno writes only happen when the render + * cache is indeed flushed (but only if the caller actually wants that). + */ + if (flush_domains) + flags |= PIPE_CONTROL_CS_STALL; ret = intel_ring_begin(ring, 6); if (ret) -- cgit v1.2.3 From e486fad9136dce21f5ba4322ae0454d23805d342 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Thu, 28 Jun 2012 15:55:48 -0300 Subject: drm/i915: fix PIPE_WM_LINETIME definition Looks like a copy/paste error. Signed-off-by: Paulo Zanoni Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 9dfc4c5ff31e..ac65e96ea98e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4435,7 +4435,7 @@ #define PIPE_WM_LINETIME_B 0x45274 #define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \ PIPE_WM_LINETIME_A, \ - PIPE_WM_LINETIME_A) + PIPE_WM_LINETIME_B) #define PIPE_WM_LINETIME_MASK (0x1ff) #define PIPE_WM_LINETIME_TIME(x) ((x)) #define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16) -- cgit v1.2.3 From 0242f74d29df00ea97a6377e3c66f14efbb340d3 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 28 Jun 2012 17:50:34 -0400 Subject: drm/radeon: clean up CS functions in r100.c Consolidate the CS functions to one section of the file. Previously they were spread all around. Signed-off-by: Alex Deucher Reviewed-by: Jerome Glisse Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/r100.c | 2983 ++++++++++++++++++++--------------------- 1 file changed, 1491 insertions(+), 1492 deletions(-) diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 35825bf1e790..3fa82e1b9428 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -103,112 +103,6 @@ void r100_wait_for_vblank(struct radeon_device *rdev, int crtc) * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ -int r100_reloc_pitch_offset(struct radeon_cs_parser *p, - struct radeon_cs_packet *pkt, - unsigned idx, - unsigned reg) -{ - int r; - u32 tile_flags = 0; - u32 tmp; - struct radeon_cs_reloc *reloc; - u32 value; - - r = r100_cs_packet_next_reloc(p, &reloc); - if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); - r100_cs_dump_packet(p, pkt); - return r; - } - - value = radeon_get_ib_value(p, idx); - tmp = value & 0x003fffff; - tmp += (((u32)reloc->lobj.gpu_offset) >> 10); - - if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) - tile_flags |= RADEON_DST_TILE_MACRO; - if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { - if (reg == RADEON_SRC_PITCH_OFFSET) { - DRM_ERROR("Cannot src blit from microtiled surface\n"); - r100_cs_dump_packet(p, pkt); - return -EINVAL; - } - tile_flags |= RADEON_DST_TILE_MICRO; - } - - tmp |= tile_flags; - p->ib.ptr[idx] = (value & 0x3fc00000) | tmp; - } else - p->ib.ptr[idx] = (value & 0xffc00000) | tmp; - return 0; -} - -int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, - struct radeon_cs_packet *pkt, - int idx) -{ - unsigned c, i; - struct radeon_cs_reloc *reloc; - struct r100_cs_track *track; - int r = 0; - volatile uint32_t *ib; - u32 idx_value; - - ib = p->ib.ptr; - track = (struct r100_cs_track *)p->track; - c = radeon_get_ib_value(p, idx++) & 0x1F; - if (c > 16) { - DRM_ERROR("Only 16 vertex buffers are allowed %d\n", - pkt->opcode); - r100_cs_dump_packet(p, pkt); - return -EINVAL; - } - track->num_arrays = c; - for (i = 0; i < (c - 1); i+=2, idx+=3) { - r = r100_cs_packet_next_reloc(p, &reloc); - if (r) { - DRM_ERROR("No reloc for packet3 %d\n", - pkt->opcode); - r100_cs_dump_packet(p, pkt); - return r; - } - idx_value = radeon_get_ib_value(p, idx); - ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); - - track->arrays[i + 0].esize = idx_value >> 8; - track->arrays[i + 0].robj = reloc->robj; - track->arrays[i + 0].esize &= 0x7F; - r = r100_cs_packet_next_reloc(p, &reloc); - if (r) { - DRM_ERROR("No reloc for packet3 %d\n", - pkt->opcode); - r100_cs_dump_packet(p, pkt); - return r; - } - ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset); - track->arrays[i + 1].robj = reloc->robj; - track->arrays[i + 1].esize = idx_value >> 24; - track->arrays[i + 1].esize &= 0x7F; - } - if (c & 1) { - r = r100_cs_packet_next_reloc(p, &reloc); - if (r) { - DRM_ERROR("No reloc for packet3 %d\n", - pkt->opcode); - r100_cs_dump_packet(p, pkt); - return r; - } - idx_value = radeon_get_ib_value(p, idx); - ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); - track->arrays[i + 0].robj = reloc->robj; - track->arrays[i + 0].esize = idx_value >> 8; - track->arrays[i + 0].esize &= 0x7F; - } - return r; -} - void r100_pre_page_flip(struct radeon_device *rdev, int crtc) { /* enable the pflip int */ @@ -1206,6 +1100,112 @@ void r100_cp_disable(struct radeon_device *rdev) /* * CS functions */ +int r100_reloc_pitch_offset(struct radeon_cs_parser *p, + struct radeon_cs_packet *pkt, + unsigned idx, + unsigned reg) +{ + int r; + u32 tile_flags = 0; + u32 tmp; + struct radeon_cs_reloc *reloc; + u32 value; + + r = r100_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", + idx, reg); + r100_cs_dump_packet(p, pkt); + return r; + } + + value = radeon_get_ib_value(p, idx); + tmp = value & 0x003fffff; + tmp += (((u32)reloc->lobj.gpu_offset) >> 10); + + if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + tile_flags |= RADEON_DST_TILE_MACRO; + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { + if (reg == RADEON_SRC_PITCH_OFFSET) { + DRM_ERROR("Cannot src blit from microtiled surface\n"); + r100_cs_dump_packet(p, pkt); + return -EINVAL; + } + tile_flags |= RADEON_DST_TILE_MICRO; + } + + tmp |= tile_flags; + p->ib.ptr[idx] = (value & 0x3fc00000) | tmp; + } else + p->ib.ptr[idx] = (value & 0xffc00000) | tmp; + return 0; +} + +int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, + struct radeon_cs_packet *pkt, + int idx) +{ + unsigned c, i; + struct radeon_cs_reloc *reloc; + struct r100_cs_track *track; + int r = 0; + volatile uint32_t *ib; + u32 idx_value; + + ib = p->ib.ptr; + track = (struct r100_cs_track *)p->track; + c = radeon_get_ib_value(p, idx++) & 0x1F; + if (c > 16) { + DRM_ERROR("Only 16 vertex buffers are allowed %d\n", + pkt->opcode); + r100_cs_dump_packet(p, pkt); + return -EINVAL; + } + track->num_arrays = c; + for (i = 0; i < (c - 1); i+=2, idx+=3) { + r = r100_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("No reloc for packet3 %d\n", + pkt->opcode); + r100_cs_dump_packet(p, pkt); + return r; + } + idx_value = radeon_get_ib_value(p, idx); + ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); + + track->arrays[i + 0].esize = idx_value >> 8; + track->arrays[i + 0].robj = reloc->robj; + track->arrays[i + 0].esize &= 0x7F; + r = r100_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("No reloc for packet3 %d\n", + pkt->opcode); + r100_cs_dump_packet(p, pkt); + return r; + } + ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset); + track->arrays[i + 1].robj = reloc->robj; + track->arrays[i + 1].esize = idx_value >> 24; + track->arrays[i + 1].esize &= 0x7F; + } + if (c & 1) { + r = r100_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("No reloc for packet3 %d\n", + pkt->opcode); + r100_cs_dump_packet(p, pkt); + return r; + } + idx_value = radeon_get_ib_value(p, idx); + ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); + track->arrays[i + 0].robj = reloc->robj; + track->arrays[i + 0].esize = idx_value >> 8; + track->arrays[i + 0].esize &= 0x7F; + } + return r; +} + int r100_cs_parse_packet0(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, const unsigned *auth, unsigned n, @@ -2031,1590 +2031,1589 @@ int r100_cs_parse(struct radeon_cs_parser *p) return 0; } - -/* - * Global GPU functions - */ -void r100_errata(struct radeon_device *rdev) +static void r100_cs_track_texture_print(struct r100_cs_track_texture *t) { - rdev->pll_errata = 0; - - if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) { - rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS; - } - - if (rdev->family == CHIP_RV100 || - rdev->family == CHIP_RS100 || - rdev->family == CHIP_RS200) { - rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY; - } + DRM_ERROR("pitch %d\n", t->pitch); + DRM_ERROR("use_pitch %d\n", t->use_pitch); + DRM_ERROR("width %d\n", t->width); + DRM_ERROR("width_11 %d\n", t->width_11); + DRM_ERROR("height %d\n", t->height); + DRM_ERROR("height_11 %d\n", t->height_11); + DRM_ERROR("num levels %d\n", t->num_levels); + DRM_ERROR("depth %d\n", t->txdepth); + DRM_ERROR("bpp %d\n", t->cpp); + DRM_ERROR("coordinate type %d\n", t->tex_coord_type); + DRM_ERROR("width round to power of 2 %d\n", t->roundup_w); + DRM_ERROR("height round to power of 2 %d\n", t->roundup_h); + DRM_ERROR("compress format %d\n", t->compress_format); } -/* Wait for vertical sync on primary CRTC */ -void r100_gpu_wait_for_vsync(struct radeon_device *rdev) +static int r100_track_compress_size(int compress_format, int w, int h) { - uint32_t crtc_gen_cntl, tmp; - int i; + int block_width, block_height, block_bytes; + int wblocks, hblocks; + int min_wblocks; + int sz; - crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL); - if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) || - !(crtc_gen_cntl & RADEON_CRTC_EN)) { - return; - } - /* Clear the CRTC_VBLANK_SAVE bit */ - WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR); - for (i = 0; i < rdev->usec_timeout; i++) { - tmp = RREG32(RADEON_CRTC_STATUS); - if (tmp & RADEON_CRTC_VBLANK_SAVE) { - return; - } - DRM_UDELAY(1); + block_width = 4; + block_height = 4; + + switch (compress_format) { + case R100_TRACK_COMP_DXT1: + block_bytes = 8; + min_wblocks = 4; + break; + default: + case R100_TRACK_COMP_DXT35: + block_bytes = 16; + min_wblocks = 2; + break; } + + hblocks = (h + block_height - 1) / block_height; + wblocks = (w + block_width - 1) / block_width; + if (wblocks < min_wblocks) + wblocks = min_wblocks; + sz = wblocks * hblocks * block_bytes; + return sz; } -/* Wait for vertical sync on secondary CRTC */ -void r100_gpu_wait_for_vsync2(struct radeon_device *rdev) +static int r100_cs_track_cube(struct radeon_device *rdev, + struct r100_cs_track *track, unsigned idx) { - uint32_t crtc2_gen_cntl, tmp; - int i; + unsigned face, w, h; + struct radeon_bo *cube_robj; + unsigned long size; + unsigned compress_format = track->textures[idx].compress_format; - crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); - if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) || - !(crtc2_gen_cntl & RADEON_CRTC2_EN)) - return; + for (face = 0; face < 5; face++) { + cube_robj = track->textures[idx].cube_info[face].robj; + w = track->textures[idx].cube_info[face].width; + h = track->textures[idx].cube_info[face].height; - /* Clear the CRTC_VBLANK_SAVE bit */ - WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR); - for (i = 0; i < rdev->usec_timeout; i++) { - tmp = RREG32(RADEON_CRTC2_STATUS); - if (tmp & RADEON_CRTC2_VBLANK_SAVE) { - return; + if (compress_format) { + size = r100_track_compress_size(compress_format, w, h); + } else + size = w * h; + size *= track->textures[idx].cpp; + + size += track->textures[idx].cube_info[face].offset; + + if (size > radeon_bo_size(cube_robj)) { + DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", + size, radeon_bo_size(cube_robj)); + r100_cs_track_texture_print(&track->textures[idx]); + return -1; } - DRM_UDELAY(1); } + return 0; } -int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n) +static int r100_cs_track_texture_check(struct radeon_device *rdev, + struct r100_cs_track *track) { - unsigned i; - uint32_t tmp; + struct radeon_bo *robj; + unsigned long size; + unsigned u, i, w, h, d; + int ret; - for (i = 0; i < rdev->usec_timeout; i++) { - tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK; - if (tmp >= n) { - return 0; + for (u = 0; u < track->num_texture; u++) { + if (!track->textures[u].enabled) + continue; + if (track->textures[u].lookup_disable) + continue; + robj = track->textures[u].robj; + if (robj == NULL) { + DRM_ERROR("No texture bound to unit %u\n", u); + return -EINVAL; } - DRM_UDELAY(1); - } - return -1; -} + size = 0; + for (i = 0; i <= track->textures[u].num_levels; i++) { + if (track->textures[u].use_pitch) { + if (rdev->family < CHIP_R300) + w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i); + else + w = track->textures[u].pitch / (1 << i); + } else { + w = track->textures[u].width; + if (rdev->family >= CHIP_RV515) + w |= track->textures[u].width_11; + w = w / (1 << i); + if (track->textures[u].roundup_w) + w = roundup_pow_of_two(w); + } + h = track->textures[u].height; + if (rdev->family >= CHIP_RV515) + h |= track->textures[u].height_11; + h = h / (1 << i); + if (track->textures[u].roundup_h) + h = roundup_pow_of_two(h); + if (track->textures[u].tex_coord_type == 1) { + d = (1 << track->textures[u].txdepth) / (1 << i); + if (!d) + d = 1; + } else { + d = 1; + } + if (track->textures[u].compress_format) { -int r100_gui_wait_for_idle(struct radeon_device *rdev) -{ - unsigned i; - uint32_t tmp; + size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d; + /* compressed textures are block based */ + } else + size += w * h * d; + } + size *= track->textures[u].cpp; - if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) { - printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !" - " Bad things might happen.\n"); - } - for (i = 0; i < rdev->usec_timeout; i++) { - tmp = RREG32(RADEON_RBBM_STATUS); - if (!(tmp & RADEON_RBBM_ACTIVE)) { - return 0; + switch (track->textures[u].tex_coord_type) { + case 0: + case 1: + break; + case 2: + if (track->separate_cube) { + ret = r100_cs_track_cube(rdev, track, u); + if (ret) + return ret; + } else + size *= 6; + break; + default: + DRM_ERROR("Invalid texture coordinate type %u for unit " + "%u\n", track->textures[u].tex_coord_type, u); + return -EINVAL; + } + if (size > radeon_bo_size(robj)) { + DRM_ERROR("Texture of unit %u needs %lu bytes but is " + "%lu\n", u, size, radeon_bo_size(robj)); + r100_cs_track_texture_print(&track->textures[u]); + return -EINVAL; } - DRM_UDELAY(1); } - return -1; + return 0; } -int r100_mc_wait_for_idle(struct radeon_device *rdev) +int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) { unsigned i; - uint32_t tmp; + unsigned long size; + unsigned prim_walk; + unsigned nverts; + unsigned num_cb = track->cb_dirty ? track->num_cb : 0; - for (i = 0; i < rdev->usec_timeout; i++) { - /* read MC_STATUS */ - tmp = RREG32(RADEON_MC_STATUS); - if (tmp & RADEON_MC_IDLE) { - return 0; + if (num_cb && !track->zb_cb_clear && !track->color_channel_mask && + !track->blend_read_enable) + num_cb = 0; + + for (i = 0; i < num_cb; i++) { + if (track->cb[i].robj == NULL) { + DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); + return -EINVAL; + } + size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; + size += track->cb[i].offset; + if (size > radeon_bo_size(track->cb[i].robj)) { + DRM_ERROR("[drm] Buffer too small for color buffer %d " + "(need %lu have %lu) !\n", i, size, + radeon_bo_size(track->cb[i].robj)); + DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", + i, track->cb[i].pitch, track->cb[i].cpp, + track->cb[i].offset, track->maxy); + return -EINVAL; } - DRM_UDELAY(1); } - return -1; -} - -bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) -{ - u32 rbbm_status; + track->cb_dirty = false; - rbbm_status = RREG32(R_000E40_RBBM_STATUS); - if (!G_000E40_GUI_ACTIVE(rbbm_status)) { - radeon_ring_lockup_update(ring); - return false; + if (track->zb_dirty && track->z_enabled) { + if (track->zb.robj == NULL) { + DRM_ERROR("[drm] No buffer for z buffer !\n"); + return -EINVAL; + } + size = track->zb.pitch * track->zb.cpp * track->maxy; + size += track->zb.offset; + if (size > radeon_bo_size(track->zb.robj)) { + DRM_ERROR("[drm] Buffer too small for z buffer " + "(need %lu have %lu) !\n", size, + radeon_bo_size(track->zb.robj)); + DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", + track->zb.pitch, track->zb.cpp, + track->zb.offset, track->maxy); + return -EINVAL; + } } - /* force CP activities */ - radeon_ring_force_activity(rdev, ring); - return radeon_ring_test_lockup(rdev, ring); -} + track->zb_dirty = false; -void r100_bm_disable(struct radeon_device *rdev) -{ - u32 tmp; + if (track->aa_dirty && track->aaresolve) { + if (track->aa.robj == NULL) { + DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i); + return -EINVAL; + } + /* I believe the format comes from colorbuffer0. */ + size = track->aa.pitch * track->cb[0].cpp * track->maxy; + size += track->aa.offset; + if (size > radeon_bo_size(track->aa.robj)) { + DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d " + "(need %lu have %lu) !\n", i, size, + radeon_bo_size(track->aa.robj)); + DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n", + i, track->aa.pitch, track->cb[0].cpp, + track->aa.offset, track->maxy); + return -EINVAL; + } + } + track->aa_dirty = false; - /* disable bus mastering */ - tmp = RREG32(R_000030_BUS_CNTL); - WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044); - mdelay(1); - WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042); - mdelay(1); - WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040); - tmp = RREG32(RADEON_BUS_CNTL); - mdelay(1); - pci_clear_master(rdev->pdev); - mdelay(1); -} - -int r100_asic_reset(struct radeon_device *rdev) -{ - struct r100_mc_save save; - u32 status, tmp; - int ret = 0; - - status = RREG32(R_000E40_RBBM_STATUS); - if (!G_000E40_GUI_ACTIVE(status)) { - return 0; - } - r100_mc_stop(rdev, &save); - status = RREG32(R_000E40_RBBM_STATUS); - dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); - /* stop CP */ - WREG32(RADEON_CP_CSQ_CNTL, 0); - tmp = RREG32(RADEON_CP_RB_CNTL); - WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); - WREG32(RADEON_CP_RB_RPTR_WR, 0); - WREG32(RADEON_CP_RB_WPTR, 0); - WREG32(RADEON_CP_RB_CNTL, tmp); - /* save PCI state */ - pci_save_state(rdev->pdev); - /* disable bus mastering */ - r100_bm_disable(rdev); - WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) | - S_0000F0_SOFT_RESET_RE(1) | - S_0000F0_SOFT_RESET_PP(1) | - S_0000F0_SOFT_RESET_RB(1)); - RREG32(R_0000F0_RBBM_SOFT_RESET); - mdelay(500); - WREG32(R_0000F0_RBBM_SOFT_RESET, 0); - mdelay(1); - status = RREG32(R_000E40_RBBM_STATUS); - dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); - /* reset CP */ - WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); - RREG32(R_0000F0_RBBM_SOFT_RESET); - mdelay(500); - WREG32(R_0000F0_RBBM_SOFT_RESET, 0); - mdelay(1); - status = RREG32(R_000E40_RBBM_STATUS); - dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); - /* restore PCI & busmastering */ - pci_restore_state(rdev->pdev); - r100_enable_bm(rdev); - /* Check if GPU is idle */ - if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) || - G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { - dev_err(rdev->dev, "failed to reset GPU\n"); - ret = -1; - } else - dev_info(rdev->dev, "GPU reset succeed\n"); - r100_mc_resume(rdev, &save); - return ret; -} - -void r100_set_common_regs(struct radeon_device *rdev) -{ - struct drm_device *dev = rdev->ddev; - bool force_dac2 = false; - u32 tmp; - - /* set these so they don't interfere with anything */ - WREG32(RADEON_OV0_SCALE_CNTL, 0); - WREG32(RADEON_SUBPIC_CNTL, 0); - WREG32(RADEON_VIPH_CONTROL, 0); - WREG32(RADEON_I2C_CNTL_1, 0); - WREG32(RADEON_DVI_I2C_CNTL_1, 0); - WREG32(RADEON_CAP0_TRIG_CNTL, 0); - WREG32(RADEON_CAP1_TRIG_CNTL, 0); - - /* always set up dac2 on rn50 and some rv100 as lots - * of servers seem to wire it up to a VGA port but - * don't report it in the bios connector - * table. - */ - switch (dev->pdev->device) { - /* RN50 */ - case 0x515e: - case 0x5969: - force_dac2 = true; - break; - /* RV100*/ - case 0x5159: - case 0x515a: - /* DELL triple head servers */ - if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) && - ((dev->pdev->subsystem_device == 0x016c) || - (dev->pdev->subsystem_device == 0x016d) || - (dev->pdev->subsystem_device == 0x016e) || - (dev->pdev->subsystem_device == 0x016f) || - (dev->pdev->subsystem_device == 0x0170) || - (dev->pdev->subsystem_device == 0x017d) || - (dev->pdev->subsystem_device == 0x017e) || - (dev->pdev->subsystem_device == 0x0183) || - (dev->pdev->subsystem_device == 0x018a) || - (dev->pdev->subsystem_device == 0x019a))) - force_dac2 = true; - break; - } - - if (force_dac2) { - u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); - u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); - u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2); - - /* For CRT on DAC2, don't turn it on if BIOS didn't - enable it, even it's detected. - */ - - /* force it to crtc0 */ - dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL; - dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL; - disp_hw_debug |= RADEON_CRT2_DISP1_SEL; - - /* set up the TV DAC */ - tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL | - RADEON_TV_DAC_STD_MASK | - RADEON_TV_DAC_RDACPD | - RADEON_TV_DAC_GDACPD | - RADEON_TV_DAC_BDACPD | - RADEON_TV_DAC_BGADJ_MASK | - RADEON_TV_DAC_DACADJ_MASK); - tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | - RADEON_TV_DAC_NHOLD | - RADEON_TV_DAC_STD_PS2 | - (0x58 << 16)); - - WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); - WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); - WREG32(RADEON_DAC_CNTL2, dac2_cntl); + prim_walk = (track->vap_vf_cntl >> 4) & 0x3; + if (track->vap_vf_cntl & (1 << 14)) { + nverts = track->vap_alt_nverts; + } else { + nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; } - - /* switch PM block to ACPI mode */ - tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL); - tmp &= ~RADEON_PM_MODE_SEL; - WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp); - -} - -/* - * VRAM info - */ -static void r100_vram_get_type(struct radeon_device *rdev) -{ - uint32_t tmp; - - rdev->mc.vram_is_ddr = false; - if (rdev->flags & RADEON_IS_IGP) - rdev->mc.vram_is_ddr = true; - else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR) - rdev->mc.vram_is_ddr = true; - if ((rdev->family == CHIP_RV100) || - (rdev->family == CHIP_RS100) || - (rdev->family == CHIP_RS200)) { - tmp = RREG32(RADEON_MEM_CNTL); - if (tmp & RV100_HALF_MODE) { - rdev->mc.vram_width = 32; - } else { - rdev->mc.vram_width = 64; + switch (prim_walk) { + case 1: + for (i = 0; i < track->num_arrays; i++) { + size = track->arrays[i].esize * track->max_indx * 4; + if (track->arrays[i].robj == NULL) { + DRM_ERROR("(PW %u) Vertex array %u no buffer " + "bound\n", prim_walk, i); + return -EINVAL; + } + if (size > radeon_bo_size(track->arrays[i].robj)) { + dev_err(rdev->dev, "(PW %u) Vertex array %u " + "need %lu dwords have %lu dwords\n", + prim_walk, i, size >> 2, + radeon_bo_size(track->arrays[i].robj) + >> 2); + DRM_ERROR("Max indices %u\n", track->max_indx); + return -EINVAL; + } } - if (rdev->flags & RADEON_SINGLE_CRTC) { - rdev->mc.vram_width /= 4; - rdev->mc.vram_is_ddr = true; + break; + case 2: + for (i = 0; i < track->num_arrays; i++) { + size = track->arrays[i].esize * (nverts - 1) * 4; + if (track->arrays[i].robj == NULL) { + DRM_ERROR("(PW %u) Vertex array %u no buffer " + "bound\n", prim_walk, i); + return -EINVAL; + } + if (size > radeon_bo_size(track->arrays[i].robj)) { + dev_err(rdev->dev, "(PW %u) Vertex array %u " + "need %lu dwords have %lu dwords\n", + prim_walk, i, size >> 2, + radeon_bo_size(track->arrays[i].robj) + >> 2); + return -EINVAL; + } } - } else if (rdev->family <= CHIP_RV280) { - tmp = RREG32(RADEON_MEM_CNTL); - if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) { - rdev->mc.vram_width = 128; - } else { - rdev->mc.vram_width = 64; + break; + case 3: + size = track->vtx_size * nverts; + if (size != track->immd_dwords) { + DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n", + track->immd_dwords, size); + DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n", + nverts, track->vtx_size); + return -EINVAL; } - } else { - /* newer IGPs */ - rdev->mc.vram_width = 128; - } -} - -static u32 r100_get_accessible_vram(struct radeon_device *rdev) -{ - u32 aper_size; - u8 byte; - - aper_size = RREG32(RADEON_CONFIG_APER_SIZE); - - /* Set HDP_APER_CNTL only on cards that are known not to be broken, - * that is has the 2nd generation multifunction PCI interface - */ - if (rdev->family == CHIP_RV280 || - rdev->family >= CHIP_RV350) { - WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL, - ~RADEON_HDP_APER_CNTL); - DRM_INFO("Generation 2 PCI interface, using max accessible memory\n"); - return aper_size * 2; + break; + default: + DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n", + prim_walk); + return -EINVAL; } - /* Older cards have all sorts of funny issues to deal with. First - * check if it's a multifunction card by reading the PCI config - * header type... Limit those to one aperture size - */ - pci_read_config_byte(rdev->pdev, 0xe, &byte); - if (byte & 0x80) { - DRM_INFO("Generation 1 PCI interface in multifunction mode\n"); - DRM_INFO("Limiting VRAM to one aperture\n"); - return aper_size; + if (track->tex_dirty) { + track->tex_dirty = false; + return r100_cs_track_texture_check(rdev, track); } - - /* Single function older card. We read HDP_APER_CNTL to see how the BIOS - * have set it up. We don't write this as it's broken on some ASICs but - * we expect the BIOS to have done the right thing (might be too optimistic...) - */ - if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL) - return aper_size * 2; - return aper_size; + return 0; } -void r100_vram_init_sizes(struct radeon_device *rdev) +void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) { - u64 config_aper_size; + unsigned i, face; - /* work out accessible VRAM */ - rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); - rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); - rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev); - /* FIXME we don't use the second aperture yet when we could use it */ - if (rdev->mc.visible_vram_size > rdev->mc.aper_size) - rdev->mc.visible_vram_size = rdev->mc.aper_size; - config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); - if (rdev->flags & RADEON_IS_IGP) { - uint32_t tom; - /* read NB_TOM to get the amount of ram stolen for the GPU */ - tom = RREG32(RADEON_NB_TOM); - rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); - WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); - rdev->mc.mc_vram_size = rdev->mc.real_vram_size; - } else { - rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); - /* Some production boards of m6 will report 0 - * if it's 8 MB - */ - if (rdev->mc.real_vram_size == 0) { - rdev->mc.real_vram_size = 8192 * 1024; - WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); - } - /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - - * Novell bug 204882 + along with lots of ubuntu ones - */ - if (rdev->mc.aper_size > config_aper_size) - config_aper_size = rdev->mc.aper_size; + track->cb_dirty = true; + track->zb_dirty = true; + track->tex_dirty = true; + track->aa_dirty = true; - if (config_aper_size > rdev->mc.real_vram_size) - rdev->mc.mc_vram_size = config_aper_size; + if (rdev->family < CHIP_R300) { + track->num_cb = 1; + if (rdev->family <= CHIP_RS200) + track->num_texture = 3; else - rdev->mc.mc_vram_size = rdev->mc.real_vram_size; - } -} - -void r100_vga_set_state(struct radeon_device *rdev, bool state) -{ - uint32_t temp; - - temp = RREG32(RADEON_CONFIG_CNTL); - if (state == false) { - temp &= ~RADEON_CFG_VGA_RAM_EN; - temp |= RADEON_CFG_VGA_IO_DIS; + track->num_texture = 6; + track->maxy = 2048; + track->separate_cube = 1; } else { - temp &= ~RADEON_CFG_VGA_IO_DIS; + track->num_cb = 4; + track->num_texture = 16; + track->maxy = 4096; + track->separate_cube = 0; + track->aaresolve = false; + track->aa.robj = NULL; } - WREG32(RADEON_CONFIG_CNTL, temp); -} - -void r100_mc_init(struct radeon_device *rdev) -{ - u64 base; - r100_vram_get_type(rdev); - r100_vram_init_sizes(rdev); - base = rdev->mc.aper_base; - if (rdev->flags & RADEON_IS_IGP) - base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; - radeon_vram_location(rdev, &rdev->mc, base); - rdev->mc.gtt_base_align = 0; - if (!(rdev->flags & RADEON_IS_AGP)) - radeon_gtt_location(rdev, &rdev->mc); - radeon_update_bandwidth_info(rdev); -} - - -/* - * Indirect registers accessor - */ -void r100_pll_errata_after_index(struct radeon_device *rdev) -{ - if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) { - (void)RREG32(RADEON_CLOCK_CNTL_DATA); - (void)RREG32(RADEON_CRTC_GEN_CNTL); + for (i = 0; i < track->num_cb; i++) { + track->cb[i].robj = NULL; + track->cb[i].pitch = 8192; + track->cb[i].cpp = 16; + track->cb[i].offset = 0; } -} - -static void r100_pll_errata_after_data(struct radeon_device *rdev) -{ - /* This workarounds is necessary on RV100, RS100 and RS200 chips - * or the chip could hang on a subsequent access - */ - if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) { - mdelay(5); + track->z_enabled = true; + track->zb.robj = NULL; + track->zb.pitch = 8192; + track->zb.cpp = 4; + track->zb.offset = 0; + track->vtx_size = 0x7F; + track->immd_dwords = 0xFFFFFFFFUL; + track->num_arrays = 11; + track->max_indx = 0x00FFFFFFUL; + for (i = 0; i < track->num_arrays; i++) { + track->arrays[i].robj = NULL; + track->arrays[i].esize = 0x7F; } - - /* This function is required to workaround a hardware bug in some (all?) - * revisions of the R300. This workaround should be called after every - * CLOCK_CNTL_INDEX register access. If not, register reads afterward - * may not be correct. - */ - if (rdev->pll_errata & CHIP_ERRATA_R300_CG) { - uint32_t save, tmp; - - save = RREG32(RADEON_CLOCK_CNTL_INDEX); - tmp = save & ~(0x3f | RADEON_PLL_WR_EN); - WREG32(RADEON_CLOCK_CNTL_INDEX, tmp); - tmp = RREG32(RADEON_CLOCK_CNTL_DATA); - WREG32(RADEON_CLOCK_CNTL_INDEX, save); + for (i = 0; i < track->num_texture; i++) { + track->textures[i].compress_format = R100_TRACK_COMP_NONE; + track->textures[i].pitch = 16536; + track->textures[i].width = 16536; + track->textures[i].height = 16536; + track->textures[i].width_11 = 1 << 11; + track->textures[i].height_11 = 1 << 11; + track->textures[i].num_levels = 12; + if (rdev->family <= CHIP_RS200) { + track->textures[i].tex_coord_type = 0; + track->textures[i].txdepth = 0; + } else { + track->textures[i].txdepth = 16; + track->textures[i].tex_coord_type = 1; + } + track->textures[i].cpp = 64; + track->textures[i].robj = NULL; + /* CS IB emission code makes sure texture unit are disabled */ + track->textures[i].enabled = false; + track->textures[i].lookup_disable = false; + track->textures[i].roundup_w = true; + track->textures[i].roundup_h = true; + if (track->separate_cube) + for (face = 0; face < 5; face++) { + track->textures[i].cube_info[face].robj = NULL; + track->textures[i].cube_info[face].width = 16536; + track->textures[i].cube_info[face].height = 16536; + track->textures[i].cube_info[face].offset = 0; + } } } -uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg) +/* + * Global GPU functions + */ +void r100_errata(struct radeon_device *rdev) { - uint32_t data; + rdev->pll_errata = 0; - WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f); - r100_pll_errata_after_index(rdev); - data = RREG32(RADEON_CLOCK_CNTL_DATA); - r100_pll_errata_after_data(rdev); - return data; -} + if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) { + rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS; + } -void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) -{ - WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN)); - r100_pll_errata_after_index(rdev); - WREG32(RADEON_CLOCK_CNTL_DATA, v); - r100_pll_errata_after_data(rdev); + if (rdev->family == CHIP_RV100 || + rdev->family == CHIP_RS100 || + rdev->family == CHIP_RS200) { + rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY; + } } -void r100_set_safe_registers(struct radeon_device *rdev) +/* Wait for vertical sync on primary CRTC */ +void r100_gpu_wait_for_vsync(struct radeon_device *rdev) { - if (ASIC_IS_RN50(rdev)) { - rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; - rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm); - } else if (rdev->family < CHIP_R200) { - rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; - rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm); - } else { - r200_set_safe_registers(rdev); + uint32_t crtc_gen_cntl, tmp; + int i; + + crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL); + if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) || + !(crtc_gen_cntl & RADEON_CRTC_EN)) { + return; + } + /* Clear the CRTC_VBLANK_SAVE bit */ + WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR); + for (i = 0; i < rdev->usec_timeout; i++) { + tmp = RREG32(RADEON_CRTC_STATUS); + if (tmp & RADEON_CRTC_VBLANK_SAVE) { + return; + } + DRM_UDELAY(1); } } -/* - * Debugfs info - */ -#if defined(CONFIG_DEBUG_FS) -static int r100_debugfs_rbbm_info(struct seq_file *m, void *data) +/* Wait for vertical sync on secondary CRTC */ +void r100_gpu_wait_for_vsync2(struct radeon_device *rdev) { - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct radeon_device *rdev = dev->dev_private; - uint32_t reg, value; - unsigned i; + uint32_t crtc2_gen_cntl, tmp; + int i; - seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS)); - seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C)); - seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); - for (i = 0; i < 64; i++) { - WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100); - reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2; - WREG32(RADEON_RBBM_CMDFIFO_ADDR, i); - value = RREG32(RADEON_RBBM_CMDFIFO_DATA); - seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value); + crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); + if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) || + !(crtc2_gen_cntl & RADEON_CRTC2_EN)) + return; + + /* Clear the CRTC_VBLANK_SAVE bit */ + WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR); + for (i = 0; i < rdev->usec_timeout; i++) { + tmp = RREG32(RADEON_CRTC2_STATUS); + if (tmp & RADEON_CRTC2_VBLANK_SAVE) { + return; + } + DRM_UDELAY(1); } - return 0; } -static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data) +int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n) { - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - uint32_t rdp, wdp; - unsigned count, i, j; + unsigned i; + uint32_t tmp; - radeon_ring_free_size(rdev, ring); - rdp = RREG32(RADEON_CP_RB_RPTR); - wdp = RREG32(RADEON_CP_RB_WPTR); - count = (rdp + ring->ring_size - wdp) & ring->ptr_mask; - seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); - seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); - seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); - seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); - seq_printf(m, "%u dwords in ring\n", count); - for (j = 0; j <= count; j++) { - i = (rdp + j) & ring->ptr_mask; - seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); + for (i = 0; i < rdev->usec_timeout; i++) { + tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK; + if (tmp >= n) { + return 0; + } + DRM_UDELAY(1); } - return 0; + return -1; } - -static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data) +int r100_gui_wait_for_idle(struct radeon_device *rdev) { - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct radeon_device *rdev = dev->dev_private; - uint32_t csq_stat, csq2_stat, tmp; - unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr; unsigned i; + uint32_t tmp; - seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); - seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE)); - csq_stat = RREG32(RADEON_CP_CSQ_STAT); - csq2_stat = RREG32(RADEON_CP_CSQ2_STAT); - r_rptr = (csq_stat >> 0) & 0x3ff; - r_wptr = (csq_stat >> 10) & 0x3ff; - ib1_rptr = (csq_stat >> 20) & 0x3ff; - ib1_wptr = (csq2_stat >> 0) & 0x3ff; - ib2_rptr = (csq2_stat >> 10) & 0x3ff; - ib2_wptr = (csq2_stat >> 20) & 0x3ff; - seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat); - seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat); - seq_printf(m, "Ring rptr %u\n", r_rptr); - seq_printf(m, "Ring wptr %u\n", r_wptr); - seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr); - seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr); - seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr); - seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr); - /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms - * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */ - seq_printf(m, "Ring fifo:\n"); - for (i = 0; i < 256; i++) { - WREG32(RADEON_CP_CSQ_ADDR, i << 2); - tmp = RREG32(RADEON_CP_CSQ_DATA); - seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp); - } - seq_printf(m, "Indirect1 fifo:\n"); - for (i = 256; i <= 512; i++) { - WREG32(RADEON_CP_CSQ_ADDR, i << 2); - tmp = RREG32(RADEON_CP_CSQ_DATA); - seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp); + if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) { + printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !" + " Bad things might happen.\n"); } - seq_printf(m, "Indirect2 fifo:\n"); - for (i = 640; i < ib1_wptr; i++) { - WREG32(RADEON_CP_CSQ_ADDR, i << 2); - tmp = RREG32(RADEON_CP_CSQ_DATA); - seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp); + for (i = 0; i < rdev->usec_timeout; i++) { + tmp = RREG32(RADEON_RBBM_STATUS); + if (!(tmp & RADEON_RBBM_ACTIVE)) { + return 0; + } + DRM_UDELAY(1); } - return 0; + return -1; } -static int r100_debugfs_mc_info(struct seq_file *m, void *data) +int r100_mc_wait_for_idle(struct radeon_device *rdev) { - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct radeon_device *rdev = dev->dev_private; + unsigned i; uint32_t tmp; - tmp = RREG32(RADEON_CONFIG_MEMSIZE); - seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp); - tmp = RREG32(RADEON_MC_FB_LOCATION); - seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp); - tmp = RREG32(RADEON_BUS_CNTL); - seq_printf(m, "BUS_CNTL 0x%08x\n", tmp); - tmp = RREG32(RADEON_MC_AGP_LOCATION); - seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp); - tmp = RREG32(RADEON_AGP_BASE); - seq_printf(m, "AGP_BASE 0x%08x\n", tmp); - tmp = RREG32(RADEON_HOST_PATH_CNTL); - seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp); - tmp = RREG32(0x01D0); - seq_printf(m, "AIC_CTRL 0x%08x\n", tmp); - tmp = RREG32(RADEON_AIC_LO_ADDR); - seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp); - tmp = RREG32(RADEON_AIC_HI_ADDR); - seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp); - tmp = RREG32(0x01E4); - seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp); - return 0; + for (i = 0; i < rdev->usec_timeout; i++) { + /* read MC_STATUS */ + tmp = RREG32(RADEON_MC_STATUS); + if (tmp & RADEON_MC_IDLE) { + return 0; + } + DRM_UDELAY(1); + } + return -1; } -static struct drm_info_list r100_debugfs_rbbm_list[] = { - {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL}, -}; - -static struct drm_info_list r100_debugfs_cp_list[] = { - {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL}, - {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL}, -}; - -static struct drm_info_list r100_debugfs_mc_info_list[] = { - {"r100_mc_info", r100_debugfs_mc_info, 0, NULL}, -}; -#endif - -int r100_debugfs_rbbm_init(struct radeon_device *rdev) +bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) { -#if defined(CONFIG_DEBUG_FS) - return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1); -#else - return 0; -#endif -} + u32 rbbm_status; -int r100_debugfs_cp_init(struct radeon_device *rdev) -{ -#if defined(CONFIG_DEBUG_FS) - return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2); -#else - return 0; -#endif + rbbm_status = RREG32(R_000E40_RBBM_STATUS); + if (!G_000E40_GUI_ACTIVE(rbbm_status)) { + radeon_ring_lockup_update(ring); + return false; + } + /* force CP activities */ + radeon_ring_force_activity(rdev, ring); + return radeon_ring_test_lockup(rdev, ring); } -int r100_debugfs_mc_info_init(struct radeon_device *rdev) +void r100_bm_disable(struct radeon_device *rdev) { -#if defined(CONFIG_DEBUG_FS) - return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1); -#else - return 0; -#endif + u32 tmp; + + /* disable bus mastering */ + tmp = RREG32(R_000030_BUS_CNTL); + WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044); + mdelay(1); + WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042); + mdelay(1); + WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040); + tmp = RREG32(RADEON_BUS_CNTL); + mdelay(1); + pci_clear_master(rdev->pdev); + mdelay(1); } -int r100_set_surface_reg(struct radeon_device *rdev, int reg, - uint32_t tiling_flags, uint32_t pitch, - uint32_t offset, uint32_t obj_size) +int r100_asic_reset(struct radeon_device *rdev) { - int surf_index = reg * 16; - int flags = 0; + struct r100_mc_save save; + u32 status, tmp; + int ret = 0; - if (rdev->family <= CHIP_RS200) { - if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) - == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) - flags |= RADEON_SURF_TILE_COLOR_BOTH; - if (tiling_flags & RADEON_TILING_MACRO) - flags |= RADEON_SURF_TILE_COLOR_MACRO; - } else if (rdev->family <= CHIP_RV280) { - if (tiling_flags & (RADEON_TILING_MACRO)) - flags |= R200_SURF_TILE_COLOR_MACRO; - if (tiling_flags & RADEON_TILING_MICRO) - flags |= R200_SURF_TILE_COLOR_MICRO; - } else { - if (tiling_flags & RADEON_TILING_MACRO) - flags |= R300_SURF_TILE_MACRO; - if (tiling_flags & RADEON_TILING_MICRO) - flags |= R300_SURF_TILE_MICRO; + status = RREG32(R_000E40_RBBM_STATUS); + if (!G_000E40_GUI_ACTIVE(status)) { + return 0; } + r100_mc_stop(rdev, &save); + status = RREG32(R_000E40_RBBM_STATUS); + dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); + /* stop CP */ + WREG32(RADEON_CP_CSQ_CNTL, 0); + tmp = RREG32(RADEON_CP_RB_CNTL); + WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); + WREG32(RADEON_CP_RB_RPTR_WR, 0); + WREG32(RADEON_CP_RB_WPTR, 0); + WREG32(RADEON_CP_RB_CNTL, tmp); + /* save PCI state */ + pci_save_state(rdev->pdev); + /* disable bus mastering */ + r100_bm_disable(rdev); + WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) | + S_0000F0_SOFT_RESET_RE(1) | + S_0000F0_SOFT_RESET_PP(1) | + S_0000F0_SOFT_RESET_RB(1)); + RREG32(R_0000F0_RBBM_SOFT_RESET); + mdelay(500); + WREG32(R_0000F0_RBBM_SOFT_RESET, 0); + mdelay(1); + status = RREG32(R_000E40_RBBM_STATUS); + dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); + /* reset CP */ + WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); + RREG32(R_0000F0_RBBM_SOFT_RESET); + mdelay(500); + WREG32(R_0000F0_RBBM_SOFT_RESET, 0); + mdelay(1); + status = RREG32(R_000E40_RBBM_STATUS); + dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); + /* restore PCI & busmastering */ + pci_restore_state(rdev->pdev); + r100_enable_bm(rdev); + /* Check if GPU is idle */ + if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) || + G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { + dev_err(rdev->dev, "failed to reset GPU\n"); + ret = -1; + } else + dev_info(rdev->dev, "GPU reset succeed\n"); + r100_mc_resume(rdev, &save); + return ret; +} - if (tiling_flags & RADEON_TILING_SWAP_16BIT) - flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP; - if (tiling_flags & RADEON_TILING_SWAP_32BIT) - flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; - - /* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */ - if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) { - if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO))) - if (ASIC_IS_RN50(rdev)) - pitch /= 16; - } +void r100_set_common_regs(struct radeon_device *rdev) +{ + struct drm_device *dev = rdev->ddev; + bool force_dac2 = false; + u32 tmp; - /* r100/r200 divide by 16 */ - if (rdev->family < CHIP_R300) - flags |= pitch / 16; - else - flags |= pitch / 8; + /* set these so they don't interfere with anything */ + WREG32(RADEON_OV0_SCALE_CNTL, 0); + WREG32(RADEON_SUBPIC_CNTL, 0); + WREG32(RADEON_VIPH_CONTROL, 0); + WREG32(RADEON_I2C_CNTL_1, 0); + WREG32(RADEON_DVI_I2C_CNTL_1, 0); + WREG32(RADEON_CAP0_TRIG_CNTL, 0); + WREG32(RADEON_CAP1_TRIG_CNTL, 0); + /* always set up dac2 on rn50 and some rv100 as lots + * of servers seem to wire it up to a VGA port but + * don't report it in the bios connector + * table. + */ + switch (dev->pdev->device) { + /* RN50 */ + case 0x515e: + case 0x5969: + force_dac2 = true; + break; + /* RV100*/ + case 0x5159: + case 0x515a: + /* DELL triple head servers */ + if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) && + ((dev->pdev->subsystem_device == 0x016c) || + (dev->pdev->subsystem_device == 0x016d) || + (dev->pdev->subsystem_device == 0x016e) || + (dev->pdev->subsystem_device == 0x016f) || + (dev->pdev->subsystem_device == 0x0170) || + (dev->pdev->subsystem_device == 0x017d) || + (dev->pdev->subsystem_device == 0x017e) || + (dev->pdev->subsystem_device == 0x0183) || + (dev->pdev->subsystem_device == 0x018a) || + (dev->pdev->subsystem_device == 0x019a))) + force_dac2 = true; + break; + } - DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); - WREG32(RADEON_SURFACE0_INFO + surf_index, flags); - WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); - WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1); - return 0; -} + if (force_dac2) { + u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); + u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); + u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2); -void r100_clear_surface_reg(struct radeon_device *rdev, int reg) -{ - int surf_index = reg * 16; - WREG32(RADEON_SURFACE0_INFO + surf_index, 0); -} + /* For CRT on DAC2, don't turn it on if BIOS didn't + enable it, even it's detected. + */ -void r100_bandwidth_update(struct radeon_device *rdev) -{ - fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; - fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; - fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; - uint32_t temp, data, mem_trcd, mem_trp, mem_tras; - fixed20_12 memtcas_ff[8] = { - dfixed_init(1), - dfixed_init(2), - dfixed_init(3), - dfixed_init(0), - dfixed_init_half(1), - dfixed_init_half(2), - dfixed_init(0), - }; - fixed20_12 memtcas_rs480_ff[8] = { - dfixed_init(0), - dfixed_init(1), - dfixed_init(2), - dfixed_init(3), - dfixed_init(0), - dfixed_init_half(1), - dfixed_init_half(2), - dfixed_init_half(3), - }; - fixed20_12 memtcas2_ff[8] = { - dfixed_init(0), - dfixed_init(1), - dfixed_init(2), - dfixed_init(3), - dfixed_init(4), - dfixed_init(5), - dfixed_init(6), - dfixed_init(7), - }; - fixed20_12 memtrbs[8] = { - dfixed_init(1), - dfixed_init_half(1), - dfixed_init(2), - dfixed_init_half(2), - dfixed_init(3), - dfixed_init_half(3), - dfixed_init(4), - dfixed_init_half(4) - }; - fixed20_12 memtrbs_r4xx[8] = { - dfixed_init(4), - dfixed_init(5), - dfixed_init(6), - dfixed_init(7), - dfixed_init(8), - dfixed_init(9), - dfixed_init(10), - dfixed_init(11) - }; - fixed20_12 min_mem_eff; - fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; - fixed20_12 cur_latency_mclk, cur_latency_sclk; - fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate, - disp_drain_rate2, read_return_rate; - fixed20_12 time_disp1_drop_priority; - int c; - int cur_size = 16; /* in octawords */ - int critical_point = 0, critical_point2; -/* uint32_t read_return_rate, time_disp1_drop_priority; */ - int stop_req, max_stop_req; - struct drm_display_mode *mode1 = NULL; - struct drm_display_mode *mode2 = NULL; - uint32_t pixel_bytes1 = 0; - uint32_t pixel_bytes2 = 0; + /* force it to crtc0 */ + dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL; + dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL; + disp_hw_debug |= RADEON_CRT2_DISP1_SEL; - radeon_update_display_priority(rdev); + /* set up the TV DAC */ + tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL | + RADEON_TV_DAC_STD_MASK | + RADEON_TV_DAC_RDACPD | + RADEON_TV_DAC_GDACPD | + RADEON_TV_DAC_BDACPD | + RADEON_TV_DAC_BGADJ_MASK | + RADEON_TV_DAC_DACADJ_MASK); + tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | + RADEON_TV_DAC_NHOLD | + RADEON_TV_DAC_STD_PS2 | + (0x58 << 16)); - if (rdev->mode_info.crtcs[0]->base.enabled) { - mode1 = &rdev->mode_info.crtcs[0]->base.mode; - pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; + WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); + WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); + WREG32(RADEON_DAC_CNTL2, dac2_cntl); } - if (!(rdev->flags & RADEON_SINGLE_CRTC)) { - if (rdev->mode_info.crtcs[1]->base.enabled) { - mode2 = &rdev->mode_info.crtcs[1]->base.mode; - pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; + + /* switch PM block to ACPI mode */ + tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL); + tmp &= ~RADEON_PM_MODE_SEL; + WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp); + +} + +/* + * VRAM info + */ +static void r100_vram_get_type(struct radeon_device *rdev) +{ + uint32_t tmp; + + rdev->mc.vram_is_ddr = false; + if (rdev->flags & RADEON_IS_IGP) + rdev->mc.vram_is_ddr = true; + else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR) + rdev->mc.vram_is_ddr = true; + if ((rdev->family == CHIP_RV100) || + (rdev->family == CHIP_RS100) || + (rdev->family == CHIP_RS200)) { + tmp = RREG32(RADEON_MEM_CNTL); + if (tmp & RV100_HALF_MODE) { + rdev->mc.vram_width = 32; + } else { + rdev->mc.vram_width = 64; + } + if (rdev->flags & RADEON_SINGLE_CRTC) { + rdev->mc.vram_width /= 4; + rdev->mc.vram_is_ddr = true; } + } else if (rdev->family <= CHIP_RV280) { + tmp = RREG32(RADEON_MEM_CNTL); + if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) { + rdev->mc.vram_width = 128; + } else { + rdev->mc.vram_width = 64; + } + } else { + /* newer IGPs */ + rdev->mc.vram_width = 128; } +} - min_mem_eff.full = dfixed_const_8(0); - /* get modes */ - if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { - uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); - mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); - mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); - /* check crtc enables */ - if (mode2) - mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); - if (mode1) - mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); - WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); +static u32 r100_get_accessible_vram(struct radeon_device *rdev) +{ + u32 aper_size; + u8 byte; + + aper_size = RREG32(RADEON_CONFIG_APER_SIZE); + + /* Set HDP_APER_CNTL only on cards that are known not to be broken, + * that is has the 2nd generation multifunction PCI interface + */ + if (rdev->family == CHIP_RV280 || + rdev->family >= CHIP_RV350) { + WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL, + ~RADEON_HDP_APER_CNTL); + DRM_INFO("Generation 2 PCI interface, using max accessible memory\n"); + return aper_size * 2; } - /* - * determine is there is enough bw for current mode + /* Older cards have all sorts of funny issues to deal with. First + * check if it's a multifunction card by reading the PCI config + * header type... Limit those to one aperture size */ - sclk_ff = rdev->pm.sclk; - mclk_ff = rdev->pm.mclk; + pci_read_config_byte(rdev->pdev, 0xe, &byte); + if (byte & 0x80) { + DRM_INFO("Generation 1 PCI interface in multifunction mode\n"); + DRM_INFO("Limiting VRAM to one aperture\n"); + return aper_size; + } - temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); - temp_ff.full = dfixed_const(temp); - mem_bw.full = dfixed_mul(mclk_ff, temp_ff); + /* Single function older card. We read HDP_APER_CNTL to see how the BIOS + * have set it up. We don't write this as it's broken on some ASICs but + * we expect the BIOS to have done the right thing (might be too optimistic...) + */ + if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL) + return aper_size * 2; + return aper_size; +} - pix_clk.full = 0; - pix_clk2.full = 0; - peak_disp_bw.full = 0; - if (mode1) { - temp_ff.full = dfixed_const(1000); - pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */ - pix_clk.full = dfixed_div(pix_clk, temp_ff); - temp_ff.full = dfixed_const(pixel_bytes1); - peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff); - } - if (mode2) { - temp_ff.full = dfixed_const(1000); - pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */ - pix_clk2.full = dfixed_div(pix_clk2, temp_ff); - temp_ff.full = dfixed_const(pixel_bytes2); - peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff); - } +void r100_vram_init_sizes(struct radeon_device *rdev) +{ + u64 config_aper_size; - mem_bw.full = dfixed_mul(mem_bw, min_mem_eff); - if (peak_disp_bw.full >= mem_bw.full) { - DRM_ERROR("You may not have enough display bandwidth for current mode\n" - "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); + /* work out accessible VRAM */ + rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); + rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); + rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev); + /* FIXME we don't use the second aperture yet when we could use it */ + if (rdev->mc.visible_vram_size > rdev->mc.aper_size) + rdev->mc.visible_vram_size = rdev->mc.aper_size; + config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); + if (rdev->flags & RADEON_IS_IGP) { + uint32_t tom; + /* read NB_TOM to get the amount of ram stolen for the GPU */ + tom = RREG32(RADEON_NB_TOM); + rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); + WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); + rdev->mc.mc_vram_size = rdev->mc.real_vram_size; + } else { + rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); + /* Some production boards of m6 will report 0 + * if it's 8 MB + */ + if (rdev->mc.real_vram_size == 0) { + rdev->mc.real_vram_size = 8192 * 1024; + WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); + } + /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - + * Novell bug 204882 + along with lots of ubuntu ones + */ + if (rdev->mc.aper_size > config_aper_size) + config_aper_size = rdev->mc.aper_size; + + if (config_aper_size > rdev->mc.real_vram_size) + rdev->mc.mc_vram_size = config_aper_size; + else + rdev->mc.mc_vram_size = rdev->mc.real_vram_size; } +} - /* Get values from the EXT_MEM_CNTL register...converting its contents. */ - temp = RREG32(RADEON_MEM_TIMING_CNTL); - if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ - mem_trcd = ((temp >> 2) & 0x3) + 1; - mem_trp = ((temp & 0x3)) + 1; - mem_tras = ((temp & 0x70) >> 4) + 1; - } else if (rdev->family == CHIP_R300 || - rdev->family == CHIP_R350) { /* r300, r350 */ - mem_trcd = (temp & 0x7) + 1; - mem_trp = ((temp >> 8) & 0x7) + 1; - mem_tras = ((temp >> 11) & 0xf) + 4; - } else if (rdev->family == CHIP_RV350 || - rdev->family <= CHIP_RV380) { - /* rv3x0 */ - mem_trcd = (temp & 0x7) + 3; - mem_trp = ((temp >> 8) & 0x7) + 3; - mem_tras = ((temp >> 11) & 0xf) + 6; - } else if (rdev->family == CHIP_R420 || - rdev->family == CHIP_R423 || - rdev->family == CHIP_RV410) { - /* r4xx */ - mem_trcd = (temp & 0xf) + 3; - if (mem_trcd > 15) - mem_trcd = 15; - mem_trp = ((temp >> 8) & 0xf) + 3; - if (mem_trp > 15) - mem_trp = 15; - mem_tras = ((temp >> 12) & 0x1f) + 6; - if (mem_tras > 31) - mem_tras = 31; - } else { /* RV200, R200 */ - mem_trcd = (temp & 0x7) + 1; - mem_trp = ((temp >> 8) & 0x7) + 1; - mem_tras = ((temp >> 12) & 0xf) + 4; +void r100_vga_set_state(struct radeon_device *rdev, bool state) +{ + uint32_t temp; + + temp = RREG32(RADEON_CONFIG_CNTL); + if (state == false) { + temp &= ~RADEON_CFG_VGA_RAM_EN; + temp |= RADEON_CFG_VGA_IO_DIS; + } else { + temp &= ~RADEON_CFG_VGA_IO_DIS; } - /* convert to FF */ - trcd_ff.full = dfixed_const(mem_trcd); - trp_ff.full = dfixed_const(mem_trp); - tras_ff.full = dfixed_const(mem_tras); + WREG32(RADEON_CONFIG_CNTL, temp); +} - /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ - temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); - data = (temp & (7 << 20)) >> 20; - if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { - if (rdev->family == CHIP_RS480) /* don't think rs400 */ - tcas_ff = memtcas_rs480_ff[data]; - else - tcas_ff = memtcas_ff[data]; - } else - tcas_ff = memtcas2_ff[data]; +void r100_mc_init(struct radeon_device *rdev) +{ + u64 base; - if (rdev->family == CHIP_RS400 || - rdev->family == CHIP_RS480) { - /* extra cas latency stored in bits 23-25 0-4 clocks */ - data = (temp >> 23) & 0x7; - if (data < 5) - tcas_ff.full += dfixed_const(data); + r100_vram_get_type(rdev); + r100_vram_init_sizes(rdev); + base = rdev->mc.aper_base; + if (rdev->flags & RADEON_IS_IGP) + base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; + radeon_vram_location(rdev, &rdev->mc, base); + rdev->mc.gtt_base_align = 0; + if (!(rdev->flags & RADEON_IS_AGP)) + radeon_gtt_location(rdev, &rdev->mc); + radeon_update_bandwidth_info(rdev); +} + + +/* + * Indirect registers accessor + */ +void r100_pll_errata_after_index(struct radeon_device *rdev) +{ + if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) { + (void)RREG32(RADEON_CLOCK_CNTL_DATA); + (void)RREG32(RADEON_CRTC_GEN_CNTL); } +} - if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { - /* on the R300, Tcas is included in Trbs. - */ - temp = RREG32(RADEON_MEM_CNTL); - data = (R300_MEM_NUM_CHANNELS_MASK & temp); - if (data == 1) { - if (R300_MEM_USE_CD_CH_ONLY & temp) { - temp = RREG32(R300_MC_IND_INDEX); - temp &= ~R300_MC_IND_ADDR_MASK; - temp |= R300_MC_READ_CNTL_CD_mcind; - WREG32(R300_MC_IND_INDEX, temp); - temp = RREG32(R300_MC_IND_DATA); - data = (R300_MEM_RBS_POSITION_C_MASK & temp); - } else { - temp = RREG32(R300_MC_READ_CNTL_AB); - data = (R300_MEM_RBS_POSITION_A_MASK & temp); - } - } else { - temp = RREG32(R300_MC_READ_CNTL_AB); - data = (R300_MEM_RBS_POSITION_A_MASK & temp); - } - if (rdev->family == CHIP_RV410 || - rdev->family == CHIP_R420 || - rdev->family == CHIP_R423) - trbs_ff = memtrbs_r4xx[data]; - else - trbs_ff = memtrbs[data]; - tcas_ff.full += trbs_ff.full; +static void r100_pll_errata_after_data(struct radeon_device *rdev) +{ + /* This workarounds is necessary on RV100, RS100 and RS200 chips + * or the chip could hang on a subsequent access + */ + if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) { + mdelay(5); } - sclk_eff_ff.full = sclk_ff.full; + /* This function is required to workaround a hardware bug in some (all?) + * revisions of the R300. This workaround should be called after every + * CLOCK_CNTL_INDEX register access. If not, register reads afterward + * may not be correct. + */ + if (rdev->pll_errata & CHIP_ERRATA_R300_CG) { + uint32_t save, tmp; - if (rdev->flags & RADEON_IS_AGP) { - fixed20_12 agpmode_ff; - agpmode_ff.full = dfixed_const(radeon_agpmode); - temp_ff.full = dfixed_const_666(16); - sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff); + save = RREG32(RADEON_CLOCK_CNTL_INDEX); + tmp = save & ~(0x3f | RADEON_PLL_WR_EN); + WREG32(RADEON_CLOCK_CNTL_INDEX, tmp); + tmp = RREG32(RADEON_CLOCK_CNTL_DATA); + WREG32(RADEON_CLOCK_CNTL_INDEX, save); } - /* TODO PCIE lanes may affect this - agpmode == 16?? */ +} - if (ASIC_IS_R300(rdev)) { - sclk_delay_ff.full = dfixed_const(250); - } else { - if ((rdev->family == CHIP_RV100) || - rdev->flags & RADEON_IS_IGP) { - if (rdev->mc.vram_is_ddr) - sclk_delay_ff.full = dfixed_const(41); - else - sclk_delay_ff.full = dfixed_const(33); - } else { - if (rdev->mc.vram_width == 128) - sclk_delay_ff.full = dfixed_const(57); - else - sclk_delay_ff.full = dfixed_const(41); - } - } +uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg) +{ + uint32_t data; - mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff); + WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f); + r100_pll_errata_after_index(rdev); + data = RREG32(RADEON_CLOCK_CNTL_DATA); + r100_pll_errata_after_data(rdev); + return data; +} - if (rdev->mc.vram_is_ddr) { - if (rdev->mc.vram_width == 32) { - k1.full = dfixed_const(40); - c = 3; - } else { - k1.full = dfixed_const(20); - c = 1; - } +void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) +{ + WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN)); + r100_pll_errata_after_index(rdev); + WREG32(RADEON_CLOCK_CNTL_DATA, v); + r100_pll_errata_after_data(rdev); +} + +void r100_set_safe_registers(struct radeon_device *rdev) +{ + if (ASIC_IS_RN50(rdev)) { + rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; + rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm); + } else if (rdev->family < CHIP_R200) { + rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; + rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm); } else { - k1.full = dfixed_const(40); - c = 3; + r200_set_safe_registers(rdev); } +} - temp_ff.full = dfixed_const(2); - mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff); - temp_ff.full = dfixed_const(c); - mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff); - temp_ff.full = dfixed_const(4); - mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff); - mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff); - mc_latency_mclk.full += k1.full; +/* + * Debugfs info + */ +#if defined(CONFIG_DEBUG_FS) +static int r100_debugfs_rbbm_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct radeon_device *rdev = dev->dev_private; + uint32_t reg, value; + unsigned i; - mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff); - mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff); + seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS)); + seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C)); + seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); + for (i = 0; i < 64; i++) { + WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100); + reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2; + WREG32(RADEON_RBBM_CMDFIFO_ADDR, i); + value = RREG32(RADEON_RBBM_CMDFIFO_DATA); + seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value); + } + return 0; +} - /* - HW cursor time assuming worst case of full size colour cursor. - */ - temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); - temp_ff.full += trcd_ff.full; - if (temp_ff.full < tras_ff.full) - temp_ff.full = tras_ff.full; - cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff); +static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; + uint32_t rdp, wdp; + unsigned count, i, j; - temp_ff.full = dfixed_const(cur_size); - cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff); - /* - Find the total latency for the display data. - */ - disp_latency_overhead.full = dfixed_const(8); - disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff); - mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; - mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; + radeon_ring_free_size(rdev, ring); + rdp = RREG32(RADEON_CP_RB_RPTR); + wdp = RREG32(RADEON_CP_RB_WPTR); + count = (rdp + ring->ring_size - wdp) & ring->ptr_mask; + seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); + seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); + seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); + seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); + seq_printf(m, "%u dwords in ring\n", count); + for (j = 0; j <= count; j++) { + i = (rdp + j) & ring->ptr_mask; + seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); + } + return 0; +} - if (mc_latency_mclk.full > mc_latency_sclk.full) - disp_latency.full = mc_latency_mclk.full; - else - disp_latency.full = mc_latency_sclk.full; - /* setup Max GRPH_STOP_REQ default value */ - if (ASIC_IS_RV100(rdev)) - max_stop_req = 0x5c; - else - max_stop_req = 0x7c; +static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct radeon_device *rdev = dev->dev_private; + uint32_t csq_stat, csq2_stat, tmp; + unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr; + unsigned i; - if (mode1) { - /* CRTC1 - Set GRPH_BUFFER_CNTL register using h/w defined optimal values. - GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] - */ - stop_req = mode1->hdisplay * pixel_bytes1 / 16; + seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); + seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE)); + csq_stat = RREG32(RADEON_CP_CSQ_STAT); + csq2_stat = RREG32(RADEON_CP_CSQ2_STAT); + r_rptr = (csq_stat >> 0) & 0x3ff; + r_wptr = (csq_stat >> 10) & 0x3ff; + ib1_rptr = (csq_stat >> 20) & 0x3ff; + ib1_wptr = (csq2_stat >> 0) & 0x3ff; + ib2_rptr = (csq2_stat >> 10) & 0x3ff; + ib2_wptr = (csq2_stat >> 20) & 0x3ff; + seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat); + seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat); + seq_printf(m, "Ring rptr %u\n", r_rptr); + seq_printf(m, "Ring wptr %u\n", r_wptr); + seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr); + seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr); + seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr); + seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr); + /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms + * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */ + seq_printf(m, "Ring fifo:\n"); + for (i = 0; i < 256; i++) { + WREG32(RADEON_CP_CSQ_ADDR, i << 2); + tmp = RREG32(RADEON_CP_CSQ_DATA); + seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp); + } + seq_printf(m, "Indirect1 fifo:\n"); + for (i = 256; i <= 512; i++) { + WREG32(RADEON_CP_CSQ_ADDR, i << 2); + tmp = RREG32(RADEON_CP_CSQ_DATA); + seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp); + } + seq_printf(m, "Indirect2 fifo:\n"); + for (i = 640; i < ib1_wptr; i++) { + WREG32(RADEON_CP_CSQ_ADDR, i << 2); + tmp = RREG32(RADEON_CP_CSQ_DATA); + seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp); + } + return 0; +} + +static int r100_debugfs_mc_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct radeon_device *rdev = dev->dev_private; + uint32_t tmp; - if (stop_req > max_stop_req) - stop_req = max_stop_req; + tmp = RREG32(RADEON_CONFIG_MEMSIZE); + seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp); + tmp = RREG32(RADEON_MC_FB_LOCATION); + seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp); + tmp = RREG32(RADEON_BUS_CNTL); + seq_printf(m, "BUS_CNTL 0x%08x\n", tmp); + tmp = RREG32(RADEON_MC_AGP_LOCATION); + seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp); + tmp = RREG32(RADEON_AGP_BASE); + seq_printf(m, "AGP_BASE 0x%08x\n", tmp); + tmp = RREG32(RADEON_HOST_PATH_CNTL); + seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp); + tmp = RREG32(0x01D0); + seq_printf(m, "AIC_CTRL 0x%08x\n", tmp); + tmp = RREG32(RADEON_AIC_LO_ADDR); + seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp); + tmp = RREG32(RADEON_AIC_HI_ADDR); + seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp); + tmp = RREG32(0x01E4); + seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp); + return 0; +} - /* - Find the drain rate of the display buffer. - */ - temp_ff.full = dfixed_const((16/pixel_bytes1)); - disp_drain_rate.full = dfixed_div(pix_clk, temp_ff); +static struct drm_info_list r100_debugfs_rbbm_list[] = { + {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL}, +}; - /* - Find the critical point of the display buffer. - */ - crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency); - crit_point_ff.full += dfixed_const_half(0); +static struct drm_info_list r100_debugfs_cp_list[] = { + {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL}, + {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL}, +}; - critical_point = dfixed_trunc(crit_point_ff); +static struct drm_info_list r100_debugfs_mc_info_list[] = { + {"r100_mc_info", r100_debugfs_mc_info, 0, NULL}, +}; +#endif - if (rdev->disp_priority == 2) { - critical_point = 0; - } +int r100_debugfs_rbbm_init(struct radeon_device *rdev) +{ +#if defined(CONFIG_DEBUG_FS) + return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1); +#else + return 0; +#endif +} - /* - The critical point should never be above max_stop_req-4. Setting - GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. - */ - if (max_stop_req - critical_point < 4) - critical_point = 0; +int r100_debugfs_cp_init(struct radeon_device *rdev) +{ +#if defined(CONFIG_DEBUG_FS) + return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2); +#else + return 0; +#endif +} - if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { - /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ - critical_point = 0x10; - } +int r100_debugfs_mc_info_init(struct radeon_device *rdev) +{ +#if defined(CONFIG_DEBUG_FS) + return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1); +#else + return 0; +#endif +} - temp = RREG32(RADEON_GRPH_BUFFER_CNTL); - temp &= ~(RADEON_GRPH_STOP_REQ_MASK); - temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); - temp &= ~(RADEON_GRPH_START_REQ_MASK); - if ((rdev->family == CHIP_R350) && - (stop_req > 0x15)) { - stop_req -= 0x10; - } - temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); - temp |= RADEON_GRPH_BUFFER_SIZE; - temp &= ~(RADEON_GRPH_CRITICAL_CNTL | - RADEON_GRPH_CRITICAL_AT_SOF | - RADEON_GRPH_STOP_CNTL); - /* - Write the result into the register. - */ - WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | - (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); +int r100_set_surface_reg(struct radeon_device *rdev, int reg, + uint32_t tiling_flags, uint32_t pitch, + uint32_t offset, uint32_t obj_size) +{ + int surf_index = reg * 16; + int flags = 0; -#if 0 - if ((rdev->family == CHIP_RS400) || - (rdev->family == CHIP_RS480)) { - /* attempt to program RS400 disp regs correctly ??? */ - temp = RREG32(RS400_DISP1_REG_CNTL); - temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | - RS400_DISP1_STOP_REQ_LEVEL_MASK); - WREG32(RS400_DISP1_REQ_CNTL1, (temp | - (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | - (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); - temp = RREG32(RS400_DMIF_MEM_CNTL1); - temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | - RS400_DISP1_CRITICAL_POINT_STOP_MASK); - WREG32(RS400_DMIF_MEM_CNTL1, (temp | - (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | - (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); - } -#endif + if (rdev->family <= CHIP_RS200) { + if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) + == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) + flags |= RADEON_SURF_TILE_COLOR_BOTH; + if (tiling_flags & RADEON_TILING_MACRO) + flags |= RADEON_SURF_TILE_COLOR_MACRO; + } else if (rdev->family <= CHIP_RV280) { + if (tiling_flags & (RADEON_TILING_MACRO)) + flags |= R200_SURF_TILE_COLOR_MACRO; + if (tiling_flags & RADEON_TILING_MICRO) + flags |= R200_SURF_TILE_COLOR_MICRO; + } else { + if (tiling_flags & RADEON_TILING_MACRO) + flags |= R300_SURF_TILE_MACRO; + if (tiling_flags & RADEON_TILING_MICRO) + flags |= R300_SURF_TILE_MICRO; + } - DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n", - /* (unsigned int)info->SavedReg->grph_buffer_cntl, */ - (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); + if (tiling_flags & RADEON_TILING_SWAP_16BIT) + flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP; + if (tiling_flags & RADEON_TILING_SWAP_32BIT) + flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; + + /* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */ + if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) { + if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO))) + if (ASIC_IS_RN50(rdev)) + pitch /= 16; } - if (mode2) { - u32 grph2_cntl; - stop_req = mode2->hdisplay * pixel_bytes2 / 16; + /* r100/r200 divide by 16 */ + if (rdev->family < CHIP_R300) + flags |= pitch / 16; + else + flags |= pitch / 8; - if (stop_req > max_stop_req) - stop_req = max_stop_req; - /* - Find the drain rate of the display buffer. - */ - temp_ff.full = dfixed_const((16/pixel_bytes2)); - disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff); + DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); + WREG32(RADEON_SURFACE0_INFO + surf_index, flags); + WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); + WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1); + return 0; +} - grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); - grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); - grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); - grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); - if ((rdev->family == CHIP_R350) && - (stop_req > 0x15)) { - stop_req -= 0x10; - } - grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); - grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; - grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | - RADEON_GRPH_CRITICAL_AT_SOF | - RADEON_GRPH_STOP_CNTL); +void r100_clear_surface_reg(struct radeon_device *rdev, int reg) +{ + int surf_index = reg * 16; + WREG32(RADEON_SURFACE0_INFO + surf_index, 0); +} - if ((rdev->family == CHIP_RS100) || - (rdev->family == CHIP_RS200)) - critical_point2 = 0; - else { - temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; - temp_ff.full = dfixed_const(temp); - temp_ff.full = dfixed_mul(mclk_ff, temp_ff); - if (sclk_ff.full < temp_ff.full) - temp_ff.full = sclk_ff.full; +void r100_bandwidth_update(struct radeon_device *rdev) +{ + fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; + fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; + fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; + uint32_t temp, data, mem_trcd, mem_trp, mem_tras; + fixed20_12 memtcas_ff[8] = { + dfixed_init(1), + dfixed_init(2), + dfixed_init(3), + dfixed_init(0), + dfixed_init_half(1), + dfixed_init_half(2), + dfixed_init(0), + }; + fixed20_12 memtcas_rs480_ff[8] = { + dfixed_init(0), + dfixed_init(1), + dfixed_init(2), + dfixed_init(3), + dfixed_init(0), + dfixed_init_half(1), + dfixed_init_half(2), + dfixed_init_half(3), + }; + fixed20_12 memtcas2_ff[8] = { + dfixed_init(0), + dfixed_init(1), + dfixed_init(2), + dfixed_init(3), + dfixed_init(4), + dfixed_init(5), + dfixed_init(6), + dfixed_init(7), + }; + fixed20_12 memtrbs[8] = { + dfixed_init(1), + dfixed_init_half(1), + dfixed_init(2), + dfixed_init_half(2), + dfixed_init(3), + dfixed_init_half(3), + dfixed_init(4), + dfixed_init_half(4) + }; + fixed20_12 memtrbs_r4xx[8] = { + dfixed_init(4), + dfixed_init(5), + dfixed_init(6), + dfixed_init(7), + dfixed_init(8), + dfixed_init(9), + dfixed_init(10), + dfixed_init(11) + }; + fixed20_12 min_mem_eff; + fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; + fixed20_12 cur_latency_mclk, cur_latency_sclk; + fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate, + disp_drain_rate2, read_return_rate; + fixed20_12 time_disp1_drop_priority; + int c; + int cur_size = 16; /* in octawords */ + int critical_point = 0, critical_point2; +/* uint32_t read_return_rate, time_disp1_drop_priority; */ + int stop_req, max_stop_req; + struct drm_display_mode *mode1 = NULL; + struct drm_display_mode *mode2 = NULL; + uint32_t pixel_bytes1 = 0; + uint32_t pixel_bytes2 = 0; - read_return_rate.full = temp_ff.full; + radeon_update_display_priority(rdev); - if (mode1) { - temp_ff.full = read_return_rate.full - disp_drain_rate.full; - time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff); - } else { - time_disp1_drop_priority.full = 0; - } - crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; - crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2); - crit_point_ff.full += dfixed_const_half(0); + if (rdev->mode_info.crtcs[0]->base.enabled) { + mode1 = &rdev->mode_info.crtcs[0]->base.mode; + pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; + } + if (!(rdev->flags & RADEON_SINGLE_CRTC)) { + if (rdev->mode_info.crtcs[1]->base.enabled) { + mode2 = &rdev->mode_info.crtcs[1]->base.mode; + pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; + } + } - critical_point2 = dfixed_trunc(crit_point_ff); + min_mem_eff.full = dfixed_const_8(0); + /* get modes */ + if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { + uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); + mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); + mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); + /* check crtc enables */ + if (mode2) + mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); + if (mode1) + mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); + WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); + } - if (rdev->disp_priority == 2) { - critical_point2 = 0; - } + /* + * determine is there is enough bw for current mode + */ + sclk_ff = rdev->pm.sclk; + mclk_ff = rdev->pm.mclk; - if (max_stop_req - critical_point2 < 4) - critical_point2 = 0; + temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); + temp_ff.full = dfixed_const(temp); + mem_bw.full = dfixed_mul(mclk_ff, temp_ff); - } + pix_clk.full = 0; + pix_clk2.full = 0; + peak_disp_bw.full = 0; + if (mode1) { + temp_ff.full = dfixed_const(1000); + pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */ + pix_clk.full = dfixed_div(pix_clk, temp_ff); + temp_ff.full = dfixed_const(pixel_bytes1); + peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff); + } + if (mode2) { + temp_ff.full = dfixed_const(1000); + pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */ + pix_clk2.full = dfixed_div(pix_clk2, temp_ff); + temp_ff.full = dfixed_const(pixel_bytes2); + peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff); + } - if (critical_point2 == 0 && rdev->family == CHIP_R300) { - /* some R300 cards have problem with this set to 0 */ - critical_point2 = 0x10; - } + mem_bw.full = dfixed_mul(mem_bw, min_mem_eff); + if (peak_disp_bw.full >= mem_bw.full) { + DRM_ERROR("You may not have enough display bandwidth for current mode\n" + "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); + } - WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | - (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); + /* Get values from the EXT_MEM_CNTL register...converting its contents. */ + temp = RREG32(RADEON_MEM_TIMING_CNTL); + if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ + mem_trcd = ((temp >> 2) & 0x3) + 1; + mem_trp = ((temp & 0x3)) + 1; + mem_tras = ((temp & 0x70) >> 4) + 1; + } else if (rdev->family == CHIP_R300 || + rdev->family == CHIP_R350) { /* r300, r350 */ + mem_trcd = (temp & 0x7) + 1; + mem_trp = ((temp >> 8) & 0x7) + 1; + mem_tras = ((temp >> 11) & 0xf) + 4; + } else if (rdev->family == CHIP_RV350 || + rdev->family <= CHIP_RV380) { + /* rv3x0 */ + mem_trcd = (temp & 0x7) + 3; + mem_trp = ((temp >> 8) & 0x7) + 3; + mem_tras = ((temp >> 11) & 0xf) + 6; + } else if (rdev->family == CHIP_R420 || + rdev->family == CHIP_R423 || + rdev->family == CHIP_RV410) { + /* r4xx */ + mem_trcd = (temp & 0xf) + 3; + if (mem_trcd > 15) + mem_trcd = 15; + mem_trp = ((temp >> 8) & 0xf) + 3; + if (mem_trp > 15) + mem_trp = 15; + mem_tras = ((temp >> 12) & 0x1f) + 6; + if (mem_tras > 31) + mem_tras = 31; + } else { /* RV200, R200 */ + mem_trcd = (temp & 0x7) + 1; + mem_trp = ((temp >> 8) & 0x7) + 1; + mem_tras = ((temp >> 12) & 0xf) + 4; + } + /* convert to FF */ + trcd_ff.full = dfixed_const(mem_trcd); + trp_ff.full = dfixed_const(mem_trp); + tras_ff.full = dfixed_const(mem_tras); - if ((rdev->family == CHIP_RS400) || - (rdev->family == CHIP_RS480)) { -#if 0 - /* attempt to program RS400 disp2 regs correctly ??? */ - temp = RREG32(RS400_DISP2_REQ_CNTL1); - temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | - RS400_DISP2_STOP_REQ_LEVEL_MASK); - WREG32(RS400_DISP2_REQ_CNTL1, (temp | - (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | - (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); - temp = RREG32(RS400_DISP2_REQ_CNTL2); - temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | - RS400_DISP2_CRITICAL_POINT_STOP_MASK); - WREG32(RS400_DISP2_REQ_CNTL2, (temp | - (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | - (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); -#endif - WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); - WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); - WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); - WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); - } + /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ + temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); + data = (temp & (7 << 20)) >> 20; + if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { + if (rdev->family == CHIP_RS480) /* don't think rs400 */ + tcas_ff = memtcas_rs480_ff[data]; + else + tcas_ff = memtcas_ff[data]; + } else + tcas_ff = memtcas2_ff[data]; - DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n", - (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); + if (rdev->family == CHIP_RS400 || + rdev->family == CHIP_RS480) { + /* extra cas latency stored in bits 23-25 0-4 clocks */ + data = (temp >> 23) & 0x7; + if (data < 5) + tcas_ff.full += dfixed_const(data); } -} -static void r100_cs_track_texture_print(struct r100_cs_track_texture *t) -{ - DRM_ERROR("pitch %d\n", t->pitch); - DRM_ERROR("use_pitch %d\n", t->use_pitch); - DRM_ERROR("width %d\n", t->width); - DRM_ERROR("width_11 %d\n", t->width_11); - DRM_ERROR("height %d\n", t->height); - DRM_ERROR("height_11 %d\n", t->height_11); - DRM_ERROR("num levels %d\n", t->num_levels); - DRM_ERROR("depth %d\n", t->txdepth); - DRM_ERROR("bpp %d\n", t->cpp); - DRM_ERROR("coordinate type %d\n", t->tex_coord_type); - DRM_ERROR("width round to power of 2 %d\n", t->roundup_w); - DRM_ERROR("height round to power of 2 %d\n", t->roundup_h); - DRM_ERROR("compress format %d\n", t->compress_format); -} + if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { + /* on the R300, Tcas is included in Trbs. + */ + temp = RREG32(RADEON_MEM_CNTL); + data = (R300_MEM_NUM_CHANNELS_MASK & temp); + if (data == 1) { + if (R300_MEM_USE_CD_CH_ONLY & temp) { + temp = RREG32(R300_MC_IND_INDEX); + temp &= ~R300_MC_IND_ADDR_MASK; + temp |= R300_MC_READ_CNTL_CD_mcind; + WREG32(R300_MC_IND_INDEX, temp); + temp = RREG32(R300_MC_IND_DATA); + data = (R300_MEM_RBS_POSITION_C_MASK & temp); + } else { + temp = RREG32(R300_MC_READ_CNTL_AB); + data = (R300_MEM_RBS_POSITION_A_MASK & temp); + } + } else { + temp = RREG32(R300_MC_READ_CNTL_AB); + data = (R300_MEM_RBS_POSITION_A_MASK & temp); + } + if (rdev->family == CHIP_RV410 || + rdev->family == CHIP_R420 || + rdev->family == CHIP_R423) + trbs_ff = memtrbs_r4xx[data]; + else + trbs_ff = memtrbs[data]; + tcas_ff.full += trbs_ff.full; + } -static int r100_track_compress_size(int compress_format, int w, int h) -{ - int block_width, block_height, block_bytes; - int wblocks, hblocks; - int min_wblocks; - int sz; + sclk_eff_ff.full = sclk_ff.full; - block_width = 4; - block_height = 4; + if (rdev->flags & RADEON_IS_AGP) { + fixed20_12 agpmode_ff; + agpmode_ff.full = dfixed_const(radeon_agpmode); + temp_ff.full = dfixed_const_666(16); + sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff); + } + /* TODO PCIE lanes may affect this - agpmode == 16?? */ - switch (compress_format) { - case R100_TRACK_COMP_DXT1: - block_bytes = 8; - min_wblocks = 4; - break; - default: - case R100_TRACK_COMP_DXT35: - block_bytes = 16; - min_wblocks = 2; - break; + if (ASIC_IS_R300(rdev)) { + sclk_delay_ff.full = dfixed_const(250); + } else { + if ((rdev->family == CHIP_RV100) || + rdev->flags & RADEON_IS_IGP) { + if (rdev->mc.vram_is_ddr) + sclk_delay_ff.full = dfixed_const(41); + else + sclk_delay_ff.full = dfixed_const(33); + } else { + if (rdev->mc.vram_width == 128) + sclk_delay_ff.full = dfixed_const(57); + else + sclk_delay_ff.full = dfixed_const(41); + } } - hblocks = (h + block_height - 1) / block_height; - wblocks = (w + block_width - 1) / block_width; - if (wblocks < min_wblocks) - wblocks = min_wblocks; - sz = wblocks * hblocks * block_bytes; - return sz; -} + mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff); -static int r100_cs_track_cube(struct radeon_device *rdev, - struct r100_cs_track *track, unsigned idx) -{ - unsigned face, w, h; - struct radeon_bo *cube_robj; - unsigned long size; - unsigned compress_format = track->textures[idx].compress_format; + if (rdev->mc.vram_is_ddr) { + if (rdev->mc.vram_width == 32) { + k1.full = dfixed_const(40); + c = 3; + } else { + k1.full = dfixed_const(20); + c = 1; + } + } else { + k1.full = dfixed_const(40); + c = 3; + } - for (face = 0; face < 5; face++) { - cube_robj = track->textures[idx].cube_info[face].robj; - w = track->textures[idx].cube_info[face].width; - h = track->textures[idx].cube_info[face].height; + temp_ff.full = dfixed_const(2); + mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff); + temp_ff.full = dfixed_const(c); + mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff); + temp_ff.full = dfixed_const(4); + mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff); + mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff); + mc_latency_mclk.full += k1.full; - if (compress_format) { - size = r100_track_compress_size(compress_format, w, h); - } else - size = w * h; - size *= track->textures[idx].cpp; + mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff); + mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff); - size += track->textures[idx].cube_info[face].offset; + /* + HW cursor time assuming worst case of full size colour cursor. + */ + temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); + temp_ff.full += trcd_ff.full; + if (temp_ff.full < tras_ff.full) + temp_ff.full = tras_ff.full; + cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff); - if (size > radeon_bo_size(cube_robj)) { - DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", - size, radeon_bo_size(cube_robj)); - r100_cs_track_texture_print(&track->textures[idx]); - return -1; - } - } - return 0; -} + temp_ff.full = dfixed_const(cur_size); + cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff); + /* + Find the total latency for the display data. + */ + disp_latency_overhead.full = dfixed_const(8); + disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff); + mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; + mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; -static int r100_cs_track_texture_check(struct radeon_device *rdev, - struct r100_cs_track *track) -{ - struct radeon_bo *robj; - unsigned long size; - unsigned u, i, w, h, d; - int ret; + if (mc_latency_mclk.full > mc_latency_sclk.full) + disp_latency.full = mc_latency_mclk.full; + else + disp_latency.full = mc_latency_sclk.full; - for (u = 0; u < track->num_texture; u++) { - if (!track->textures[u].enabled) - continue; - if (track->textures[u].lookup_disable) - continue; - robj = track->textures[u].robj; - if (robj == NULL) { - DRM_ERROR("No texture bound to unit %u\n", u); - return -EINVAL; - } - size = 0; - for (i = 0; i <= track->textures[u].num_levels; i++) { - if (track->textures[u].use_pitch) { - if (rdev->family < CHIP_R300) - w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i); - else - w = track->textures[u].pitch / (1 << i); - } else { - w = track->textures[u].width; - if (rdev->family >= CHIP_RV515) - w |= track->textures[u].width_11; - w = w / (1 << i); - if (track->textures[u].roundup_w) - w = roundup_pow_of_two(w); - } - h = track->textures[u].height; - if (rdev->family >= CHIP_RV515) - h |= track->textures[u].height_11; - h = h / (1 << i); - if (track->textures[u].roundup_h) - h = roundup_pow_of_two(h); - if (track->textures[u].tex_coord_type == 1) { - d = (1 << track->textures[u].txdepth) / (1 << i); - if (!d) - d = 1; - } else { - d = 1; - } - if (track->textures[u].compress_format) { + /* setup Max GRPH_STOP_REQ default value */ + if (ASIC_IS_RV100(rdev)) + max_stop_req = 0x5c; + else + max_stop_req = 0x7c; - size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d; - /* compressed textures are block based */ - } else - size += w * h * d; - } - size *= track->textures[u].cpp; + if (mode1) { + /* CRTC1 + Set GRPH_BUFFER_CNTL register using h/w defined optimal values. + GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] + */ + stop_req = mode1->hdisplay * pixel_bytes1 / 16; - switch (track->textures[u].tex_coord_type) { - case 0: - case 1: - break; - case 2: - if (track->separate_cube) { - ret = r100_cs_track_cube(rdev, track, u); - if (ret) - return ret; - } else - size *= 6; - break; - default: - DRM_ERROR("Invalid texture coordinate type %u for unit " - "%u\n", track->textures[u].tex_coord_type, u); - return -EINVAL; - } - if (size > radeon_bo_size(robj)) { - DRM_ERROR("Texture of unit %u needs %lu bytes but is " - "%lu\n", u, size, radeon_bo_size(robj)); - r100_cs_track_texture_print(&track->textures[u]); - return -EINVAL; - } - } - return 0; -} + if (stop_req > max_stop_req) + stop_req = max_stop_req; -int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) -{ - unsigned i; - unsigned long size; - unsigned prim_walk; - unsigned nverts; - unsigned num_cb = track->cb_dirty ? track->num_cb : 0; + /* + Find the drain rate of the display buffer. + */ + temp_ff.full = dfixed_const((16/pixel_bytes1)); + disp_drain_rate.full = dfixed_div(pix_clk, temp_ff); - if (num_cb && !track->zb_cb_clear && !track->color_channel_mask && - !track->blend_read_enable) - num_cb = 0; + /* + Find the critical point of the display buffer. + */ + crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency); + crit_point_ff.full += dfixed_const_half(0); - for (i = 0; i < num_cb; i++) { - if (track->cb[i].robj == NULL) { - DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); - return -EINVAL; - } - size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; - size += track->cb[i].offset; - if (size > radeon_bo_size(track->cb[i].robj)) { - DRM_ERROR("[drm] Buffer too small for color buffer %d " - "(need %lu have %lu) !\n", i, size, - radeon_bo_size(track->cb[i].robj)); - DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", - i, track->cb[i].pitch, track->cb[i].cpp, - track->cb[i].offset, track->maxy); - return -EINVAL; + critical_point = dfixed_trunc(crit_point_ff); + + if (rdev->disp_priority == 2) { + critical_point = 0; } - } - track->cb_dirty = false; - if (track->zb_dirty && track->z_enabled) { - if (track->zb.robj == NULL) { - DRM_ERROR("[drm] No buffer for z buffer !\n"); - return -EINVAL; + /* + The critical point should never be above max_stop_req-4. Setting + GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. + */ + if (max_stop_req - critical_point < 4) + critical_point = 0; + + if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { + /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ + critical_point = 0x10; } - size = track->zb.pitch * track->zb.cpp * track->maxy; - size += track->zb.offset; - if (size > radeon_bo_size(track->zb.robj)) { - DRM_ERROR("[drm] Buffer too small for z buffer " - "(need %lu have %lu) !\n", size, - radeon_bo_size(track->zb.robj)); - DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", - track->zb.pitch, track->zb.cpp, - track->zb.offset, track->maxy); - return -EINVAL; + + temp = RREG32(RADEON_GRPH_BUFFER_CNTL); + temp &= ~(RADEON_GRPH_STOP_REQ_MASK); + temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); + temp &= ~(RADEON_GRPH_START_REQ_MASK); + if ((rdev->family == CHIP_R350) && + (stop_req > 0x15)) { + stop_req -= 0x10; } - } - track->zb_dirty = false; + temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); + temp |= RADEON_GRPH_BUFFER_SIZE; + temp &= ~(RADEON_GRPH_CRITICAL_CNTL | + RADEON_GRPH_CRITICAL_AT_SOF | + RADEON_GRPH_STOP_CNTL); + /* + Write the result into the register. + */ + WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | + (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); - if (track->aa_dirty && track->aaresolve) { - if (track->aa.robj == NULL) { - DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i); - return -EINVAL; - } - /* I believe the format comes from colorbuffer0. */ - size = track->aa.pitch * track->cb[0].cpp * track->maxy; - size += track->aa.offset; - if (size > radeon_bo_size(track->aa.robj)) { - DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d " - "(need %lu have %lu) !\n", i, size, - radeon_bo_size(track->aa.robj)); - DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n", - i, track->aa.pitch, track->cb[0].cpp, - track->aa.offset, track->maxy); - return -EINVAL; +#if 0 + if ((rdev->family == CHIP_RS400) || + (rdev->family == CHIP_RS480)) { + /* attempt to program RS400 disp regs correctly ??? */ + temp = RREG32(RS400_DISP1_REG_CNTL); + temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | + RS400_DISP1_STOP_REQ_LEVEL_MASK); + WREG32(RS400_DISP1_REQ_CNTL1, (temp | + (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | + (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); + temp = RREG32(RS400_DMIF_MEM_CNTL1); + temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | + RS400_DISP1_CRITICAL_POINT_STOP_MASK); + WREG32(RS400_DMIF_MEM_CNTL1, (temp | + (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | + (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); } - } - track->aa_dirty = false; +#endif - prim_walk = (track->vap_vf_cntl >> 4) & 0x3; - if (track->vap_vf_cntl & (1 << 14)) { - nverts = track->vap_alt_nverts; - } else { - nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; + DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n", + /* (unsigned int)info->SavedReg->grph_buffer_cntl, */ + (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); } - switch (prim_walk) { - case 1: - for (i = 0; i < track->num_arrays; i++) { - size = track->arrays[i].esize * track->max_indx * 4; - if (track->arrays[i].robj == NULL) { - DRM_ERROR("(PW %u) Vertex array %u no buffer " - "bound\n", prim_walk, i); - return -EINVAL; - } - if (size > radeon_bo_size(track->arrays[i].robj)) { - dev_err(rdev->dev, "(PW %u) Vertex array %u " - "need %lu dwords have %lu dwords\n", - prim_walk, i, size >> 2, - radeon_bo_size(track->arrays[i].robj) - >> 2); - DRM_ERROR("Max indices %u\n", track->max_indx); - return -EINVAL; - } + + if (mode2) { + u32 grph2_cntl; + stop_req = mode2->hdisplay * pixel_bytes2 / 16; + + if (stop_req > max_stop_req) + stop_req = max_stop_req; + + /* + Find the drain rate of the display buffer. + */ + temp_ff.full = dfixed_const((16/pixel_bytes2)); + disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff); + + grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); + grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); + grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); + grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); + if ((rdev->family == CHIP_R350) && + (stop_req > 0x15)) { + stop_req -= 0x10; } - break; - case 2: - for (i = 0; i < track->num_arrays; i++) { - size = track->arrays[i].esize * (nverts - 1) * 4; - if (track->arrays[i].robj == NULL) { - DRM_ERROR("(PW %u) Vertex array %u no buffer " - "bound\n", prim_walk, i); - return -EINVAL; + grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); + grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; + grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | + RADEON_GRPH_CRITICAL_AT_SOF | + RADEON_GRPH_STOP_CNTL); + + if ((rdev->family == CHIP_RS100) || + (rdev->family == CHIP_RS200)) + critical_point2 = 0; + else { + temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; + temp_ff.full = dfixed_const(temp); + temp_ff.full = dfixed_mul(mclk_ff, temp_ff); + if (sclk_ff.full < temp_ff.full) + temp_ff.full = sclk_ff.full; + + read_return_rate.full = temp_ff.full; + + if (mode1) { + temp_ff.full = read_return_rate.full - disp_drain_rate.full; + time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff); + } else { + time_disp1_drop_priority.full = 0; } - if (size > radeon_bo_size(track->arrays[i].robj)) { - dev_err(rdev->dev, "(PW %u) Vertex array %u " - "need %lu dwords have %lu dwords\n", - prim_walk, i, size >> 2, - radeon_bo_size(track->arrays[i].robj) - >> 2); - return -EINVAL; + crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; + crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2); + crit_point_ff.full += dfixed_const_half(0); + + critical_point2 = dfixed_trunc(crit_point_ff); + + if (rdev->disp_priority == 2) { + critical_point2 = 0; } - } - break; - case 3: - size = track->vtx_size * nverts; - if (size != track->immd_dwords) { - DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n", - track->immd_dwords, size); - DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n", - nverts, track->vtx_size); - return -EINVAL; - } - break; - default: - DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n", - prim_walk); - return -EINVAL; - } - if (track->tex_dirty) { - track->tex_dirty = false; - return r100_cs_track_texture_check(rdev, track); - } - return 0; -} + if (max_stop_req - critical_point2 < 4) + critical_point2 = 0; -void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) -{ - unsigned i, face; + } - track->cb_dirty = true; - track->zb_dirty = true; - track->tex_dirty = true; - track->aa_dirty = true; + if (critical_point2 == 0 && rdev->family == CHIP_R300) { + /* some R300 cards have problem with this set to 0 */ + critical_point2 = 0x10; + } - if (rdev->family < CHIP_R300) { - track->num_cb = 1; - if (rdev->family <= CHIP_RS200) - track->num_texture = 3; - else - track->num_texture = 6; - track->maxy = 2048; - track->separate_cube = 1; - } else { - track->num_cb = 4; - track->num_texture = 16; - track->maxy = 4096; - track->separate_cube = 0; - track->aaresolve = false; - track->aa.robj = NULL; - } + WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | + (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); - for (i = 0; i < track->num_cb; i++) { - track->cb[i].robj = NULL; - track->cb[i].pitch = 8192; - track->cb[i].cpp = 16; - track->cb[i].offset = 0; - } - track->z_enabled = true; - track->zb.robj = NULL; - track->zb.pitch = 8192; - track->zb.cpp = 4; - track->zb.offset = 0; - track->vtx_size = 0x7F; - track->immd_dwords = 0xFFFFFFFFUL; - track->num_arrays = 11; - track->max_indx = 0x00FFFFFFUL; - for (i = 0; i < track->num_arrays; i++) { - track->arrays[i].robj = NULL; - track->arrays[i].esize = 0x7F; - } - for (i = 0; i < track->num_texture; i++) { - track->textures[i].compress_format = R100_TRACK_COMP_NONE; - track->textures[i].pitch = 16536; - track->textures[i].width = 16536; - track->textures[i].height = 16536; - track->textures[i].width_11 = 1 << 11; - track->textures[i].height_11 = 1 << 11; - track->textures[i].num_levels = 12; - if (rdev->family <= CHIP_RS200) { - track->textures[i].tex_coord_type = 0; - track->textures[i].txdepth = 0; - } else { - track->textures[i].txdepth = 16; - track->textures[i].tex_coord_type = 1; + if ((rdev->family == CHIP_RS400) || + (rdev->family == CHIP_RS480)) { +#if 0 + /* attempt to program RS400 disp2 regs correctly ??? */ + temp = RREG32(RS400_DISP2_REQ_CNTL1); + temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | + RS400_DISP2_STOP_REQ_LEVEL_MASK); + WREG32(RS400_DISP2_REQ_CNTL1, (temp | + (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | + (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); + temp = RREG32(RS400_DISP2_REQ_CNTL2); + temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | + RS400_DISP2_CRITICAL_POINT_STOP_MASK); + WREG32(RS400_DISP2_REQ_CNTL2, (temp | + (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | + (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); +#endif + WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); + WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); + WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); + WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); } - track->textures[i].cpp = 64; - track->textures[i].robj = NULL; - /* CS IB emission code makes sure texture unit are disabled */ - track->textures[i].enabled = false; - track->textures[i].lookup_disable = false; - track->textures[i].roundup_w = true; - track->textures[i].roundup_h = true; - if (track->separate_cube) - for (face = 0; face < 5; face++) { - track->textures[i].cube_info[face].robj = NULL; - track->textures[i].cube_info[face].width = 16536; - track->textures[i].cube_info[face].height = 16536; - track->textures[i].cube_info[face].offset = 0; - } + + DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n", + (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); } } -- cgit v1.2.3 From 74da01dcfbb6300d758490d5d4efa1314c0e4e8b Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 28 Jun 2012 17:50:35 -0400 Subject: drm/radeon: move r100_enable_bm to a more logic place It was stuck right in the middle of the gart functions. Move next to the bm_disable function and where it is used. Signed-off-by: Alex Deucher Reviewed-by: Jerome Glisse Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/r100.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 3fa82e1b9428..d06c8dd8ddfa 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -513,15 +513,6 @@ int r100_pci_gart_init(struct radeon_device *rdev) return radeon_gart_table_ram_alloc(rdev); } -/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ -void r100_enable_bm(struct radeon_device *rdev) -{ - uint32_t tmp; - /* Enable bus mastering */ - tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; - WREG32(RADEON_BUS_CNTL, tmp); -} - int r100_pci_gart_enable(struct radeon_device *rdev) { uint32_t tmp; @@ -2531,6 +2522,15 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) return radeon_ring_test_lockup(rdev, ring); } +/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ +void r100_enable_bm(struct radeon_device *rdev) +{ + uint32_t tmp; + /* Enable bus mastering */ + tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; + WREG32(RADEON_BUS_CNTL, tmp); +} + void r100_bm_disable(struct radeon_device *rdev) { u32 tmp; -- cgit v1.2.3 From 146937e5828ede495e11ba3a6f4a01b36b7166dc Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Fri, 29 Jun 2012 10:30:39 -0700 Subject: drm/i915: linuxify create_hw_context() Daniel complained about this on initial review, but he graciously moved the patches forward. As promised, I am delivering the desired cleanup now. Hopefully I didn't screw the trivial patch up ;-) Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_context.c | 50 ++++++++++++++++----------------- 1 file changed, 24 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index e58e8366f473..9ae3f2cf414e 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -136,37 +136,36 @@ static void do_destroy(struct i915_hw_context *ctx) kfree(ctx); } -static int +static struct i915_hw_context * create_hw_context(struct drm_device *dev, - struct drm_i915_file_private *file_priv, - struct i915_hw_context **ctx_out) + struct drm_i915_file_private *file_priv) { struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_hw_context *ctx; int ret, id; - *ctx_out = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL); - if (*ctx_out == NULL) - return -ENOMEM; + ctx = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL); + if (ctx == NULL) + return ERR_PTR(-ENOMEM); - (*ctx_out)->obj = i915_gem_alloc_object(dev, - dev_priv->hw_context_size); - if ((*ctx_out)->obj == NULL) { - kfree(*ctx_out); + ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size); + if (ctx->obj == NULL) { + kfree(ctx); DRM_DEBUG_DRIVER("Context object allocated failed\n"); - return -ENOMEM; + return ERR_PTR(-ENOMEM); } /* The ring associated with the context object is handled by the normal * object tracking code. We give an initial ring value simple to pass an * assertion in the context switch code. */ - (*ctx_out)->ring = &dev_priv->ring[RCS]; + ctx->ring = &dev_priv->ring[RCS]; /* Default context will never have a file_priv */ if (file_priv == NULL) - return 0; + return ctx; - (*ctx_out)->file_priv = file_priv; + ctx->file_priv = file_priv; again: if (idr_pre_get(&file_priv->context_idr, GFP_KERNEL) == 0) { @@ -175,21 +174,21 @@ again: goto err_out; } - ret = idr_get_new_above(&file_priv->context_idr, *ctx_out, + ret = idr_get_new_above(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, &id); if (ret == 0) - (*ctx_out)->id = id; + ctx->id = id; if (ret == -EAGAIN) goto again; else if (ret) goto err_out; - return 0; + return ctx; err_out: - do_destroy(*ctx_out); - return ret; + do_destroy(ctx); + return ERR_PTR(ret); } static inline bool is_default_context(struct i915_hw_context *ctx) @@ -209,10 +208,9 @@ static int create_default_context(struct drm_i915_private *dev_priv) BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); - ret = create_hw_context(dev_priv->dev, NULL, - &dev_priv->ring[RCS].default_context); - if (ret) - return ret; + ctx = create_hw_context(dev_priv->dev, NULL); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); /* We may need to do things with the shrinker which require us to * immediately switch back to the default context. This can cause a @@ -220,7 +218,7 @@ static int create_default_context(struct drm_i915_private *dev_priv) * may not be available. To avoid this we always pin the * default context. */ - ctx = dev_priv->ring[RCS].default_context; + dev_priv->ring[RCS].default_context = ctx; ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false); if (ret) { do_destroy(ctx); @@ -496,13 +494,13 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - ret = create_hw_context(dev, file_priv, &ctx); + ctx = create_hw_context(dev, file_priv); mutex_unlock(&dev->struct_mutex); args->ctx_id = ctx->id; DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id); - return ret; + return PTR_RET(ctx); } int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, -- cgit v1.2.3 From 990bbdadabaa51828e475eda86ee5720a4910cc3 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 2 Jul 2012 11:51:02 -0300 Subject: drm/i915: Group the GT routines together in both code and vtable Tidy up the routines for interacting with the GT (in particular the forcewake dance) which are scattered throughout the code in a single structure. v2: use wait_for_atomic for polling. v3: *really* use wait_for_atomic for polling. Signed-off-by: Chris Wilson Signed-off-by: Eugeni Dodonov Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 2 +- drivers/gpu/drm/i915/i915_drv.c | 101 ++++++++++++++++++++++------------- drivers/gpu/drm/i915/i915_drv.h | 17 +++--- drivers/gpu/drm/i915/intel_display.c | 3 -- drivers/gpu/drm/i915/intel_pm.c | 30 ----------- 5 files changed, 73 insertions(+), 80 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 216651917fd5..4dc76976c0d0 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1548,6 +1548,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) } intel_irq_init(dev); + intel_gt_init(dev); /* Try to make sure MCHBAR is enabled before poking at it */ intel_setup_mchbar(dev); @@ -1580,7 +1581,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (!IS_I945G(dev) && !IS_I945GM(dev)) pci_enable_msi(dev->pdev); - spin_lock_init(&dev_priv->gt_lock); spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->error_lock); spin_lock_init(&dev_priv->rps_lock); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 79be8799ea6c..928b6677759d 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -32,6 +32,7 @@ #include "drm.h" #include "i915_drm.h" #include "i915_drv.h" +#include "i915_trace.h" #include "intel_drv.h" #include @@ -432,36 +433,26 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) return 1; } -void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) +static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) { - int count; - - count = 0; - while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) - udelay(10); + if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0, 500)) + DRM_ERROR("Force wake wait timed out\n"); I915_WRITE_NOTRACE(FORCEWAKE, 1); - POSTING_READ(FORCEWAKE); - count = 0; - while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0) - udelay(10); + if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1), 500)) + DRM_ERROR("Force wake wait timed out\n"); } -void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) +static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) { - int count; - - count = 0; - while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1)) - udelay(10); + if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0, 500)) + DRM_ERROR("Force wake wait timed out\n"); I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); - POSTING_READ(FORCEWAKE_MT); - count = 0; - while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0) - udelay(10); + if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1), 500)) + DRM_ERROR("Force wake wait timed out\n"); } /* @@ -476,7 +467,7 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) spin_lock_irqsave(&dev_priv->gt_lock, irqflags); if (dev_priv->forcewake_count++ == 0) - dev_priv->display.force_wake_get(dev_priv); + dev_priv->gt.force_wake_get(dev_priv); spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); } @@ -489,14 +480,14 @@ static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); } -void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) +static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE, 0); /* The below doubles as a POSTING_READ */ gen6_gt_check_fifodbg(dev_priv); } -void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) +static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1)); /* The below doubles as a POSTING_READ */ @@ -512,7 +503,7 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) spin_lock_irqsave(&dev_priv->gt_lock, irqflags); if (--dev_priv->forcewake_count == 0) - dev_priv->display.force_wake_put(dev_priv); + dev_priv->gt.force_wake_put(dev_priv); spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); } @@ -536,12 +527,8 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) return ret; } -void vlv_force_wake_get(struct drm_i915_private *dev_priv) +static void vlv_force_wake_get(struct drm_i915_private *dev_priv) { - int count; - - count = 0; - /* Already awake? */ if ((I915_READ(0x130094) & 0xa1) == 0xa1) return; @@ -549,18 +536,58 @@ void vlv_force_wake_get(struct drm_i915_private *dev_priv) I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff); POSTING_READ(FORCEWAKE_VLV); - count = 0; - while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0) - udelay(10); + if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), 500)) + DRM_ERROR("Force wake wait timed out\n"); } -void vlv_force_wake_put(struct drm_i915_private *dev_priv) +static void vlv_force_wake_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000); /* FIXME: confirm VLV behavior with Punit folks */ POSTING_READ(FORCEWAKE_VLV); } +void intel_gt_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + spin_lock_init(&dev_priv->gt_lock); + + if (IS_VALLEYVIEW(dev)) { + dev_priv->gt.force_wake_get = vlv_force_wake_get; + dev_priv->gt.force_wake_put = vlv_force_wake_put; + } else if (INTEL_INFO(dev)->gen >= 6) { + dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; + dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; + + /* IVB configs may use multi-threaded forcewake */ + if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { + u32 ecobus; + + /* A small trick here - if the bios hasn't configured + * MT forcewake, and if the device is in RC6, then + * force_wake_mt_get will not wake the device and the + * ECOBUS read will return zero. Which will be + * (correctly) interpreted by the test below as MT + * forcewake being disabled. + */ + mutex_lock(&dev->struct_mutex); + __gen6_gt_force_wake_mt_get(dev_priv); + ecobus = I915_READ_NOTRACE(ECOBUS); + __gen6_gt_force_wake_mt_put(dev_priv); + mutex_unlock(&dev->struct_mutex); + + if (ecobus & FORCEWAKE_MT_ENABLE) { + DRM_DEBUG_KMS("Using MT version of forcewake\n"); + dev_priv->gt.force_wake_get = + __gen6_gt_force_wake_mt_get; + dev_priv->gt.force_wake_put = + __gen6_gt_force_wake_mt_put; + } + } + } +} + static int i915_drm_freeze(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -797,9 +824,9 @@ static int gen6_do_reset(struct drm_device *dev) /* If reset with a user forcewake, try to restore, otherwise turn it off */ if (dev_priv->forcewake_count) - dev_priv->display.force_wake_get(dev_priv); + dev_priv->gt.force_wake_get(dev_priv); else - dev_priv->display.force_wake_put(dev_priv); + dev_priv->gt.force_wake_put(dev_priv); /* Restore fifo count */ dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); @@ -1248,10 +1275,10 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ unsigned long irqflags; \ spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ if (dev_priv->forcewake_count == 0) \ - dev_priv->display.force_wake_get(dev_priv); \ + dev_priv->gt.force_wake_get(dev_priv); \ val = read##y(dev_priv->regs + reg); \ if (dev_priv->forcewake_count == 0) \ - dev_priv->display.force_wake_put(dev_priv); \ + dev_priv->gt.force_wake_put(dev_priv); \ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ } else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ val = read##y(dev_priv->regs + reg + 0x180000); \ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a0c15abbdcef..60f6974d20d4 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -262,8 +262,6 @@ struct drm_i915_display_funcs { struct drm_i915_gem_object *obj); int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y); - void (*force_wake_get)(struct drm_i915_private *dev_priv); - void (*force_wake_put)(struct drm_i915_private *dev_priv); /* clock updates for mode set */ /* cursor updates */ /* render clock increase/decrease */ @@ -271,6 +269,11 @@ struct drm_i915_display_funcs { /* pll clock increase/decrease */ }; +struct drm_i915_gt_funcs { + void (*force_wake_get)(struct drm_i915_private *dev_priv); + void (*force_wake_put)(struct drm_i915_private *dev_priv); +}; + struct intel_device_info { u8 gen; u8 is_mobile:1; @@ -362,6 +365,8 @@ typedef struct drm_i915_private { int relative_constants_mode; void __iomem *regs; + + struct drm_i915_gt_funcs gt; /** gt_fifo_count and the subsequent register write are synchronized * with dev->struct_mutex. */ unsigned gt_fifo_count; @@ -1200,6 +1205,7 @@ void i915_hangcheck_elapsed(unsigned long data); void i915_handle_error(struct drm_device *dev, bool wedged); extern void intel_irq_init(struct drm_device *dev); +extern void intel_gt_init(struct drm_device *dev); void i915_error_state_free(struct kref *error_ref); @@ -1517,13 +1523,6 @@ extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); extern int intel_enable_rc6(const struct drm_device *dev); extern bool i915_semaphore_is_enabled(struct drm_device *dev); -extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); -extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv); -extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); -extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv); - -extern void vlv_force_wake_get(struct drm_i915_private *dev_priv); -extern void vlv_force_wake_put(struct drm_i915_private *dev_priv); /* overlay */ #ifdef CONFIG_DEBUG_FS diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3fbc802b4940..342f18e9cbe0 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -7013,9 +7013,6 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.write_eld = ironlake_write_eld; } else dev_priv->display.update_wm = NULL; - } else if (IS_VALLEYVIEW(dev)) { - dev_priv->display.force_wake_get = vlv_force_wake_get; - dev_priv->display.force_wake_put = vlv_force_wake_put; } else if (IS_G4X(dev)) { dev_priv->display.write_eld = g4x_write_eld; } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 99bc1f33bfcb..ed9912ca1f82 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3765,34 +3765,6 @@ void intel_init_pm(struct drm_device *dev) /* For FIFO watermark updates */ if (HAS_PCH_SPLIT(dev)) { - dev_priv->display.force_wake_get = __gen6_gt_force_wake_get; - dev_priv->display.force_wake_put = __gen6_gt_force_wake_put; - - /* IVB configs may use multi-threaded forcewake */ - if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { - u32 ecobus; - - /* A small trick here - if the bios hasn't configured MT forcewake, - * and if the device is in RC6, then force_wake_mt_get will not wake - * the device and the ECOBUS read will return zero. Which will be - * (correctly) interpreted by the test below as MT forcewake being - * disabled. - */ - mutex_lock(&dev->struct_mutex); - __gen6_gt_force_wake_mt_get(dev_priv); - ecobus = I915_READ_NOTRACE(ECOBUS); - __gen6_gt_force_wake_mt_put(dev_priv); - mutex_unlock(&dev->struct_mutex); - - if (ecobus & FORCEWAKE_MT_ENABLE) { - DRM_DEBUG_KMS("Using MT version of forcewake\n"); - dev_priv->display.force_wake_get = - __gen6_gt_force_wake_mt_get; - dev_priv->display.force_wake_put = - __gen6_gt_force_wake_mt_put; - } - } - if (HAS_PCH_IBX(dev)) dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating; else if (HAS_PCH_CPT(dev)) @@ -3848,8 +3820,6 @@ void intel_init_pm(struct drm_device *dev) dev_priv->display.update_wm = valleyview_update_wm; dev_priv->display.init_clock_gating = valleyview_init_clock_gating; - dev_priv->display.force_wake_get = vlv_force_wake_get; - dev_priv->display.force_wake_put = vlv_force_wake_put; } else if (IS_PINEVIEW(dev)) { if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, -- cgit v1.2.3 From c4de7b0ffda2bb4843fd7f1052d0a2bb90bd08a5 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 2 Jul 2012 11:51:03 -0300 Subject: drm/i915: Implement w/a for sporadic read failures on waking from rc6 As a w/a to prevent reads sporadically returning 0, we need to wait for the GT thread to return to TC0 before proceeding to read the registers. v2: adapt for Haswell changes (Eugeni). v3: use wait_for_atomic_us for thread status polling. v3: *really* use wait_for_atomic for polling. References: https://bugs.freedesktop.org/show_bug.cgi?id=50243 Signed-off-by: Chris Wilson Signed-off-by: Eugeni Dodonov Acked-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 22 ++++++++++++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 4 ++++ 2 files changed, 26 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 928b6677759d..a4ea4a9fc425 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -433,6 +433,22 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) return 1; } +static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) +{ + u32 gt_thread_status_mask; + + if (IS_HASWELL(dev_priv->dev)) + gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW; + else + gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK; + + /* w/a for a sporadic read returning 0 by waiting for the GT + * thread to wake up. + */ + if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500)) + DRM_ERROR("GT thread status wait timed out\n"); +} + static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) { if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0, 500)) @@ -442,6 +458,8 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1), 500)) DRM_ERROR("Force wake wait timed out\n"); + + __gen6_gt_wait_for_thread_c0(dev_priv); } static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) @@ -453,6 +471,8 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1), 500)) DRM_ERROR("Force wake wait timed out\n"); + + __gen6_gt_wait_for_thread_c0(dev_priv); } /* @@ -538,6 +558,8 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv) if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), 500)) DRM_ERROR("Force wake wait timed out\n"); + + __gen6_gt_wait_for_thread_c0(dev_priv); } static void vlv_force_wake_put(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ac65e96ea98e..8b400e96de10 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1458,6 +1458,10 @@ #define DDRMPLL1 0X12c20 #define PEG_BAND_GAP_DATA 0x14d68 +#define GEN6_GT_THREAD_STATUS_REG 0x13805c +#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7 +#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16)) + #define GEN6_GT_PERF_STATUS 0x145948 #define GEN6_RP_STATE_LIMITS 0x145994 #define GEN6_RP_STATE_CAP 0x145998 -- cgit v1.2.3 From e7911c48a05bc0002616a51e99761dec36110b04 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Mon, 2 Jul 2012 11:51:04 -0300 Subject: drm/i915: support Haswell force waking There is a different ACK register for force wake on Haswell, so account for that. Signed-off-by: Eugeni Dodonov Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 22 ++++++++++++++++++---- drivers/gpu/drm/i915/i915_reg.h | 1 + 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index a4ea4a9fc425..3ac414ff7f84 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -451,12 +451,19 @@ static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) { - if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0, 500)) + u32 forcewake_ack; + + if (IS_HASWELL(dev_priv->dev)) + forcewake_ack = FORCEWAKE_ACK_HSW; + else + forcewake_ack = FORCEWAKE_ACK; + + if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500)) DRM_ERROR("Force wake wait timed out\n"); I915_WRITE_NOTRACE(FORCEWAKE, 1); - if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1), 500)) + if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) DRM_ERROR("Force wake wait timed out\n"); __gen6_gt_wait_for_thread_c0(dev_priv); @@ -464,12 +471,19 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) { - if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0, 500)) + u32 forcewake_ack; + + if (IS_HASWELL(dev_priv->dev)) + forcewake_ack = FORCEWAKE_ACK_HSW; + else + forcewake_ack = FORCEWAKE_MT_ACK; + + if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500)) DRM_ERROR("Force wake wait timed out\n"); I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); - if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1), 500)) + if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) DRM_ERROR("Force wake wait timed out\n"); __gen6_gt_wait_for_thread_c0(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 8b400e96de10..40000841b39d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4071,6 +4071,7 @@ #define FORCEWAKE 0xA18C #define FORCEWAKE_VLV 0x1300b0 #define FORCEWAKE_ACK_VLV 0x1300b4 +#define FORCEWAKE_ACK_HSW 0x130044 #define FORCEWAKE_ACK 0x130090 #define FORCEWAKE_MT 0xa188 /* multi-threaded */ #define FORCEWAKE_MT_ACK 0x130040 -- cgit v1.2.3 From 5a7dc92a0b55ccaa6e342ec212657d6fc806e790 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Mon, 2 Jul 2012 11:51:05 -0300 Subject: drm/i915: add RPS configuration for Haswell Most of the RPS and RC6 enabling functionality is similar to what we had on Gen6/Gen7, so we preserve most of the registers. Note that Haswell only has RC6, so account for that as well. As suggested by Daniel Vetter, to reduce the amount of changes in the patch, we still write the RC6p/RC6pp thresholds, but those are ignored on Haswell. Note: Some discussion about the nature of the new tuning constants popped up in review - the answer is that we don't know why they've changed, but the guide from VPG with the magic numbers simply has different values now. v2: Squash fix for ?: vs | operation precende bug into this patch. Signed-off-by: Eugeni Dodonov Reviewed-by: Ben Widawsky [danvet: Added note to commit message. Squashed fix.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_pm.c | 37 +++++++++++++++++++++++++------------ 2 files changed, 26 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 40000841b39d..3be31a4cb8fb 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4132,6 +4132,7 @@ #define GEN6_RP_UP_IDLE_MIN (0x1<<3) #define GEN6_RP_UP_BUSY_AVG (0x2<<3) #define GEN6_RP_UP_BUSY_CONT (0x4<<3) +#define GEN7_RP_DOWN_IDLE_AVG (0x2<<0) #define GEN6_RP_DOWN_IDLE_CONT (0x1<<0) #define GEN6_RP_UP_THRESHOLD 0xA02C #define GEN6_RP_DOWN_THRESHOLD 0xA030 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ed9912ca1f82..4c6c26c5ad32 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2404,20 +2404,24 @@ static void gen6_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ + /* Check if we are enabling RC6 */ rc6_mode = intel_enable_rc6(dev_priv->dev); if (rc6_mode & INTEL_RC6_ENABLE) rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; - if (rc6_mode & INTEL_RC6p_ENABLE) - rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; + /* We don't use those on Haswell */ + if (!IS_HASWELL(dev)) { + if (rc6_mode & INTEL_RC6p_ENABLE) + rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; - if (rc6_mode & INTEL_RC6pp_ENABLE) - rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; + if (rc6_mode & INTEL_RC6pp_ENABLE) + rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; + } DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", - (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off", - (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off", - (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off"); + (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", + (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", + (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); I915_WRITE(GEN6_RC_CONTROL, rc6_mask | @@ -2435,10 +2439,19 @@ static void gen6_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, dev_priv->max_delay << 24 | dev_priv->min_delay << 16); - I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); - I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); - I915_WRITE(GEN6_RP_UP_EI, 100000); - I915_WRITE(GEN6_RP_DOWN_EI, 5000000); + + if (IS_HASWELL(dev)) { + I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); + I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); + I915_WRITE(GEN6_RP_UP_EI, 66000); + I915_WRITE(GEN6_RP_DOWN_EI, 350000); + } else { + I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); + I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); + I915_WRITE(GEN6_RP_UP_EI, 100000); + I915_WRITE(GEN6_RP_DOWN_EI, 5000000); + } + I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); I915_WRITE(GEN6_RP_CONTROL, GEN6_RP_MEDIA_TURBO | @@ -2446,7 +2459,7 @@ static void gen6_enable_rps(struct drm_device *dev) GEN6_RP_MEDIA_IS_GFX | GEN6_RP_ENABLE | GEN6_RP_UP_BUSY_AVG | - GEN6_RP_DOWN_IDLE_CONT); + (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT)); if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500)) -- cgit v1.2.3 From 7cf50fc8d7a491d9aa47e1a0262ed7d265f2bec3 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Mon, 2 Jul 2012 11:51:06 -0300 Subject: drm/i915: slightly improve gt enable/disable routines Just a cosmetic change to simplify the if statement. Signed-off-by: Eugeni Dodonov Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 4c6c26c5ad32..e4c7eac656bd 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3240,7 +3240,7 @@ void intel_disable_gt_powersave(struct drm_device *dev) { if (IS_IRONLAKE_M(dev)) ironlake_disable_drps(dev); - if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) + else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) gen6_disable_rps(dev); } @@ -3250,9 +3250,7 @@ void intel_enable_gt_powersave(struct drm_device *dev) ironlake_enable_drps(dev); ironlake_enable_rc6(dev); intel_init_emon(dev); - } - - if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { + } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { gen6_enable_rps(dev); gen6_update_ring_freq(dev); } -- cgit v1.2.3 From 4a637c2c83a26f496688a28e629a3c0acb8a7be5 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Mon, 2 Jul 2012 11:51:07 -0300 Subject: drm/i915: enable RC6 by default on Haswell It should be working so let's turn it on by default and catch any possible issues faster. Signed-off-by: Eugeni Dodonov Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index e4c7eac656bd..28db512b5784 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2334,9 +2334,11 @@ int intel_enable_rc6(const struct drm_device *dev) if (INTEL_INFO(dev)->gen == 5) return 0; - /* Sorry Haswell, no RC6 for you for now. */ + /* On Haswell, only RC6 is available. So let's enable it by default to + * provide better testing and coverage since the beginning. + */ if (IS_HASWELL(dev)) - return 0; + return INTEL_RC6_ENABLE; /* * Disable rc6 on Sandybridge -- cgit v1.2.3 From 885094845067dd3791b0f6c3497d51bca76e393a Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Mon, 2 Jul 2012 11:51:08 -0300 Subject: drm/i915: disable RC6 when disabling rps We weren't disabling RC6 bits when bringing down RPS. Signed-off-by: Eugeni Dodonov Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 28db512b5784..c06785ed3f43 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2305,6 +2305,7 @@ static void gen6_disable_rps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + I915_WRITE(GEN6_RC_CONTROL, 0); I915_WRITE(GEN6_RPNSWREQ, 1 << 31); I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); I915_WRITE(GEN6_PMIER, 0); -- cgit v1.2.3 From cad2a2d7761238c0b9ff62eecc89215e6bd61831 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Mon, 2 Jul 2012 11:51:09 -0300 Subject: drm/i915: introduce haswell_init_clock_gating This is based on Ivy Bridge clock gating for now, but is subject to changes in the future. Note: Compared to the ivb clock gating this drops the the IDICOS medium uncore sharing tuned in commit 208482232de3590cee4757dfabe5d8cee8c6e626 Author: Ben Widawsky Date: Fri May 4 18:58:59 2012 -0700 drm/i915: set IDICOS to medium uncore resources Eugeni wants to benchmark the effect of this first. Signed-off-by: Eugeni Dodonov [danvet: added note] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 54 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 53 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index c06785ed3f43..3c2724e42975 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3417,6 +3417,58 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) I915_WRITE(GEN7_FF_THREAD_MODE, reg); } +static void haswell_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; + uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; + + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); + + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. + * This implements the WaDisableRCZUnitClockGating workaround. + */ + I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); + + I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); + + I915_WRITE(IVB_CHICKEN3, + CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | + CHICKEN3_DGMG_DONE_FIX_DISABLE); + + /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ + I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, + GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); + + /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ + I915_WRITE(GEN7_L3CNTLREG1, + GEN7_WA_FOR_GEN7_L3_CONTROL); + I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, + GEN7_WA_L3_CHICKEN_MODE); + + /* This is required by WaCatErrorRejectionIssue */ + I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, + I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | + GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); + + for_each_pipe(pipe) { + I915_WRITE(DSPCNTR(pipe), + I915_READ(DSPCNTR(pipe)) | + DISPPLANE_TRICKLE_FEED_DISABLE); + intel_flush_display_plane(dev_priv, pipe); + } + + gen7_setup_fixed_func_scheduler(dev_priv); + + /* WaDisable4x2SubspanOptimization */ + I915_WRITE(CACHE_MODE_1, + _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); +} + static void ivybridge_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -3826,7 +3878,7 @@ void intel_init_pm(struct drm_device *dev) "Disable CxSR\n"); dev_priv->display.update_wm = NULL; } - dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; + dev_priv->display.init_clock_gating = haswell_init_clock_gating; dev_priv->display.sanitize_pm = gen6_sanitize_pm; } else dev_priv->display.update_wm = NULL; -- cgit v1.2.3 From 1544d9d57396d5c0c6b7644ed5ae1f4d6caad07a Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Mon, 2 Jul 2012 11:51:10 -0300 Subject: drm/i915: enable RC6 workaround on Haswell For Haswell, on some of the early hardware revisions, it is possible to run into issues when RC6 state is enabled and when pipes change state. v2: add comment saying that this is for early revisions only. v3: beautify as suggested by Daniel Vetter. Signed-off-by: Eugeni Dodonov Acked-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 5 +++++ drivers/gpu/drm/i915/intel_pm.c | 10 ++++++++++ 2 files changed, 15 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3be31a4cb8fb..4ddc62ecf839 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4453,4 +4453,9 @@ #define SFUSE_STRAP_DDIC_DETECTED (1<<1) #define SFUSE_STRAP_DDID_DETECTED (1<<0) +#define WM_DBG 0x45280 +#define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0) +#define WM_DBG_DISALLOW_MAXFIFO (1<<1) +#define WM_DBG_DISALLOW_SPRITE (1<<2) + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 3c2724e42975..6e02698e9a3d 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3467,6 +3467,16 @@ static void haswell_init_clock_gating(struct drm_device *dev) /* WaDisable4x2SubspanOptimization */ I915_WRITE(CACHE_MODE_1, _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); + + /* XXX: This is a workaround for early silicon revisions and should be + * removed later. + */ + I915_WRITE(WM_DBG, + I915_READ(WM_DBG) | + WM_DBG_DISALLOW_MULTIPLE_LP | + WM_DBG_DISALLOW_SPRITE | + WM_DBG_DISALLOW_MAXFIFO); + } static void ivybridge_init_clock_gating(struct drm_device *dev) -- cgit v1.2.3 From 6590190d12442b94e83f4f4590f3bb5d2848dd07 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Mon, 2 Jul 2012 11:51:11 -0300 Subject: drm/i915: move force wake support into intel_pm This commit moves force wake support routines into intel_pm modules, and exports the gen6_gt_check_fifodbg routine (used in I915_READ). Signed-off-by: Eugeni Dodonov Acked-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 191 --------------------------------------- drivers/gpu/drm/i915/intel_drv.h | 1 + drivers/gpu/drm/i915/intel_pm.c | 191 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 192 insertions(+), 191 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 3ac414ff7f84..c7e76e03a684 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -433,197 +433,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) return 1; } -static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) -{ - u32 gt_thread_status_mask; - - if (IS_HASWELL(dev_priv->dev)) - gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW; - else - gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK; - - /* w/a for a sporadic read returning 0 by waiting for the GT - * thread to wake up. - */ - if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500)) - DRM_ERROR("GT thread status wait timed out\n"); -} - -static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) -{ - u32 forcewake_ack; - - if (IS_HASWELL(dev_priv->dev)) - forcewake_ack = FORCEWAKE_ACK_HSW; - else - forcewake_ack = FORCEWAKE_ACK; - - if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500)) - DRM_ERROR("Force wake wait timed out\n"); - - I915_WRITE_NOTRACE(FORCEWAKE, 1); - - if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) - DRM_ERROR("Force wake wait timed out\n"); - - __gen6_gt_wait_for_thread_c0(dev_priv); -} - -static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) -{ - u32 forcewake_ack; - - if (IS_HASWELL(dev_priv->dev)) - forcewake_ack = FORCEWAKE_ACK_HSW; - else - forcewake_ack = FORCEWAKE_MT_ACK; - - if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500)) - DRM_ERROR("Force wake wait timed out\n"); - - I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); - - if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) - DRM_ERROR("Force wake wait timed out\n"); - - __gen6_gt_wait_for_thread_c0(dev_priv); -} - -/* - * Generally this is called implicitly by the register read function. However, - * if some sequence requires the GT to not power down then this function should - * be called at the beginning of the sequence followed by a call to - * gen6_gt_force_wake_put() at the end of the sequence. - */ -void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) -{ - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->gt_lock, irqflags); - if (dev_priv->forcewake_count++ == 0) - dev_priv->gt.force_wake_get(dev_priv); - spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); -} - -static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) -{ - u32 gtfifodbg; - gtfifodbg = I915_READ_NOTRACE(GTFIFODBG); - if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK, - "MMIO read or write has been dropped %x\n", gtfifodbg)) - I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); -} - -static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) -{ - I915_WRITE_NOTRACE(FORCEWAKE, 0); - /* The below doubles as a POSTING_READ */ - gen6_gt_check_fifodbg(dev_priv); -} - -static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) -{ - I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1)); - /* The below doubles as a POSTING_READ */ - gen6_gt_check_fifodbg(dev_priv); -} - -/* - * see gen6_gt_force_wake_get() - */ -void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) -{ - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->gt_lock, irqflags); - if (--dev_priv->forcewake_count == 0) - dev_priv->gt.force_wake_put(dev_priv); - spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); -} - -int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) -{ - int ret = 0; - - if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { - int loop = 500; - u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); - while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { - udelay(10); - fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); - } - if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) - ++ret; - dev_priv->gt_fifo_count = fifo; - } - dev_priv->gt_fifo_count--; - - return ret; -} - -static void vlv_force_wake_get(struct drm_i915_private *dev_priv) -{ - /* Already awake? */ - if ((I915_READ(0x130094) & 0xa1) == 0xa1) - return; - - I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff); - POSTING_READ(FORCEWAKE_VLV); - - if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), 500)) - DRM_ERROR("Force wake wait timed out\n"); - - __gen6_gt_wait_for_thread_c0(dev_priv); -} - -static void vlv_force_wake_put(struct drm_i915_private *dev_priv) -{ - I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000); - /* FIXME: confirm VLV behavior with Punit folks */ - POSTING_READ(FORCEWAKE_VLV); -} - -void intel_gt_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - spin_lock_init(&dev_priv->gt_lock); - - if (IS_VALLEYVIEW(dev)) { - dev_priv->gt.force_wake_get = vlv_force_wake_get; - dev_priv->gt.force_wake_put = vlv_force_wake_put; - } else if (INTEL_INFO(dev)->gen >= 6) { - dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; - dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; - - /* IVB configs may use multi-threaded forcewake */ - if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { - u32 ecobus; - - /* A small trick here - if the bios hasn't configured - * MT forcewake, and if the device is in RC6, then - * force_wake_mt_get will not wake the device and the - * ECOBUS read will return zero. Which will be - * (correctly) interpreted by the test below as MT - * forcewake being disabled. - */ - mutex_lock(&dev->struct_mutex); - __gen6_gt_force_wake_mt_get(dev_priv); - ecobus = I915_READ_NOTRACE(ECOBUS); - __gen6_gt_force_wake_mt_put(dev_priv); - mutex_unlock(&dev->struct_mutex); - - if (ecobus & FORCEWAKE_MT_ENABLE) { - DRM_DEBUG_KMS("Using MT version of forcewake\n"); - dev_priv->gt.force_wake_get = - __gen6_gt_force_wake_mt_get; - dev_priv->gt.force_wake_put = - __gen6_gt_force_wake_mt_put; - } - } - } -} - static int i915_drm_freeze(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index cc1573b2b60b..7d521aa51544 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -493,6 +493,7 @@ extern void intel_gpu_ips_teardown(void); extern void intel_enable_gt_powersave(struct drm_device *dev); extern void intel_disable_gt_powersave(struct drm_device *dev); +extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode); extern void intel_ddi_mode_set(struct drm_encoder *encoder, diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 6e02698e9a3d..3f58d7aeb70a 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3948,3 +3948,194 @@ void intel_init_pm(struct drm_device *dev) intel_init_power_wells(dev); } +static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) +{ + u32 gt_thread_status_mask; + + if (IS_HASWELL(dev_priv->dev)) + gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW; + else + gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK; + + /* w/a for a sporadic read returning 0 by waiting for the GT + * thread to wake up. + */ + if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500)) + DRM_ERROR("GT thread status wait timed out\n"); +} + +static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) +{ + u32 forcewake_ack; + + if (IS_HASWELL(dev_priv->dev)) + forcewake_ack = FORCEWAKE_ACK_HSW; + else + forcewake_ack = FORCEWAKE_ACK; + + if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500)) + DRM_ERROR("Force wake wait timed out\n"); + + I915_WRITE_NOTRACE(FORCEWAKE, 1); + + if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) + DRM_ERROR("Force wake wait timed out\n"); + + __gen6_gt_wait_for_thread_c0(dev_priv); +} + +static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) +{ + u32 forcewake_ack; + + if (IS_HASWELL(dev_priv->dev)) + forcewake_ack = FORCEWAKE_ACK_HSW; + else + forcewake_ack = FORCEWAKE_MT_ACK; + + if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500)) + DRM_ERROR("Force wake wait timed out\n"); + + I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); + + if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) + DRM_ERROR("Force wake wait timed out\n"); + + __gen6_gt_wait_for_thread_c0(dev_priv); +} + +/* + * Generally this is called implicitly by the register read function. However, + * if some sequence requires the GT to not power down then this function should + * be called at the beginning of the sequence followed by a call to + * gen6_gt_force_wake_put() at the end of the sequence. + */ +void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) +{ + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->gt_lock, irqflags); + if (dev_priv->forcewake_count++ == 0) + dev_priv->gt.force_wake_get(dev_priv); + spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); +} + +void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) +{ + u32 gtfifodbg; + gtfifodbg = I915_READ_NOTRACE(GTFIFODBG); + if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK, + "MMIO read or write has been dropped %x\n", gtfifodbg)) + I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); +} + +static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) +{ + I915_WRITE_NOTRACE(FORCEWAKE, 0); + /* The below doubles as a POSTING_READ */ + gen6_gt_check_fifodbg(dev_priv); +} + +static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) +{ + I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1)); + /* The below doubles as a POSTING_READ */ + gen6_gt_check_fifodbg(dev_priv); +} + +/* + * see gen6_gt_force_wake_get() + */ +void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) +{ + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->gt_lock, irqflags); + if (--dev_priv->forcewake_count == 0) + dev_priv->gt.force_wake_put(dev_priv); + spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); +} + +int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) +{ + int ret = 0; + + if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { + int loop = 500; + u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); + while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { + udelay(10); + fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); + } + if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) + ++ret; + dev_priv->gt_fifo_count = fifo; + } + dev_priv->gt_fifo_count--; + + return ret; +} + +static void vlv_force_wake_get(struct drm_i915_private *dev_priv) +{ + /* Already awake? */ + if ((I915_READ(0x130094) & 0xa1) == 0xa1) + return; + + I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff); + POSTING_READ(FORCEWAKE_VLV); + + if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), 500)) + DRM_ERROR("Force wake wait timed out\n"); + + __gen6_gt_wait_for_thread_c0(dev_priv); +} + +static void vlv_force_wake_put(struct drm_i915_private *dev_priv) +{ + I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000); + /* FIXME: confirm VLV behavior with Punit folks */ + POSTING_READ(FORCEWAKE_VLV); +} + +void intel_gt_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + spin_lock_init(&dev_priv->gt_lock); + + if (IS_VALLEYVIEW(dev)) { + dev_priv->gt.force_wake_get = vlv_force_wake_get; + dev_priv->gt.force_wake_put = vlv_force_wake_put; + } else if (INTEL_INFO(dev)->gen >= 6) { + dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; + dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; + + /* IVB configs may use multi-threaded forcewake */ + if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { + u32 ecobus; + + /* A small trick here - if the bios hasn't configured + * MT forcewake, and if the device is in RC6, then + * force_wake_mt_get will not wake the device and the + * ECOBUS read will return zero. Which will be + * (correctly) interpreted by the test below as MT + * forcewake being disabled. + */ + mutex_lock(&dev->struct_mutex); + __gen6_gt_force_wake_mt_get(dev_priv); + ecobus = I915_READ_NOTRACE(ECOBUS); + __gen6_gt_force_wake_mt_put(dev_priv); + mutex_unlock(&dev->struct_mutex); + + if (ecobus & FORCEWAKE_MT_ENABLE) { + DRM_DEBUG_KMS("Using MT version of forcewake\n"); + dev_priv->gt.force_wake_get = + __gen6_gt_force_wake_mt_get; + dev_priv->gt.force_wake_put = + __gen6_gt_force_wake_mt_put; + } + } + } +} + -- cgit v1.2.3 From 930ebb462422117e12b85bb5fd6548ed13d0afb5 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 29 Jun 2012 23:32:16 +0200 Subject: drm/i915: fix up ilk rc6 disabling confusion While creating the new enable/disable_gt_powersave functions in commit 8090c6b9daa04dda649ac0a2209601042abfb0a4 Author: Daniel Vetter Date: Sun Jun 24 16:42:32 2012 +0200 drm/i915: wrap up gt powersave enabling functions I've botched up the handling of ironlake_disable_rc6. Fix this up by calling it at the right place. Note though that ironlake_disable_rc6 does a bit more than just disabling rc6 - it also tears down all the allocated context objects. Hence we need to move intel_teardown_rc6 out and directly call it from intel_modeset_cleanup. Also properly mark ironlake_enable_rc6 as static and kill the un-used declaration in i915_drv.h. Note: In review a question popped out why disable_rc6 also tears down the backing object and why we should move that out - it's simply for consistency with gen6+ rps code, which does it that way. Cc: Ben Widawsky Reviewed-by: Eugeni Dodonov Reviewed-by: Ben Widawsky Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/intel_display.c | 2 ++ drivers/gpu/drm/i915/intel_drv.h | 1 + drivers/gpu/drm/i915/intel_pm.c | 14 +++++++------- 4 files changed, 10 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 60f6974d20d4..5a529b69110f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1516,7 +1516,6 @@ extern bool intel_fbc_enabled(struct drm_device *dev); extern void intel_disable_fbc(struct drm_device *dev); extern bool ironlake_set_drps(struct drm_device *dev, u8 val); extern void ironlake_init_pch_refclk(struct drm_device *dev); -extern void ironlake_enable_rc6(struct drm_device *dev); extern void gen6_set_rps(struct drm_device *dev, u8 val); extern void intel_detect_pch(struct drm_device *dev); extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 342f18e9cbe0..bd3366e755e1 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -7267,6 +7267,8 @@ void intel_modeset_cleanup(struct drm_device *dev) intel_disable_gt_powersave(dev); + ironlake_teardown_rc6(dev); + if (IS_VALLEYVIEW(dev)) vlv_init_dpio(dev); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 7d521aa51544..bc6d6165e37e 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -494,6 +494,7 @@ extern void intel_gpu_ips_teardown(void); extern void intel_enable_gt_powersave(struct drm_device *dev); extern void intel_disable_gt_powersave(struct drm_device *dev); extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); +extern void ironlake_teardown_rc6(struct drm_device *dev); extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode); extern void intel_ddi_mode_set(struct drm_encoder *encoder, diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 3f58d7aeb70a..0ed02c102fc2 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2563,7 +2563,7 @@ static void gen6_update_ring_freq(struct drm_device *dev) } } -static void ironlake_teardown_rc6(struct drm_device *dev) +void ironlake_teardown_rc6(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -2580,7 +2580,7 @@ static void ironlake_teardown_rc6(struct drm_device *dev) } } -void ironlake_disable_rc6(struct drm_device *dev) +static void ironlake_disable_rc6(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -2596,8 +2596,6 @@ void ironlake_disable_rc6(struct drm_device *dev) I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); POSTING_READ(RSTDBYCTL); } - - ironlake_teardown_rc6(dev); } static int ironlake_setup_rc6(struct drm_device *dev) @@ -2619,7 +2617,7 @@ static int ironlake_setup_rc6(struct drm_device *dev) return 0; } -void ironlake_enable_rc6(struct drm_device *dev) +static void ironlake_enable_rc6(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; @@ -3241,10 +3239,12 @@ static void intel_init_emon(struct drm_device *dev) void intel_disable_gt_powersave(struct drm_device *dev) { - if (IS_IRONLAKE_M(dev)) + if (IS_IRONLAKE_M(dev)) { ironlake_disable_drps(dev); - else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) + ironlake_disable_rc6(dev); + } else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) { gen6_disable_rps(dev); + } } void intel_enable_gt_powersave(struct drm_device *dev) -- cgit v1.2.3 From 2514bc510d0c3aadcc5204056bb440fa36845147 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Thu, 21 Jun 2012 15:13:50 -0700 Subject: drm/i915: prefer wide & slow to fast & narrow in DP configs High frequency link configurations have the potential to cause trouble with long and/or cheap cables, so prefer slow and wide configurations instead. This patch has the potential to cause trouble for eDP configurations that lie about available lanes, so if we run into that we can make it conditional on eDP. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=45801 Tested-by: peter@colberg.org Signed-off-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_dp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 76a708029dcb..611080b32e40 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -733,8 +733,8 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); - for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { - for (clock = 0; clock <= max_clock; clock++) { + for (clock = 0; clock <= max_clock; clock++) { + for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); if (mode_rate <= link_avail) { -- cgit v1.2.3 From f035083055555f8ab200f086c755d361830e3140 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Tue, 3 Jul 2012 18:48:16 -0300 Subject: drm/i915: add PCH_NONE to enum intel_pch And rely on the fact that it's 0 to assume that machines without a PCH will have PCH_NONE as dev_priv->pch_type. Just today I finally realized that HAS_PCH_IBX is true for machines without a PCH. IMHO this is totally counter-intuitive and I don't think it's a good idea to assume that we're going to check for HAS_PCH_IBX only after we check for HAS_PCH_SPLIT. I believe that in the future we'll have more PCH types and checks like: if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) will become more and more common. There's a good chance that we may break non-PCH machines by adding these checks in code that runs on all machines. I also believe that the HAS_PCH_SPLIT check will become less common as we add more and more different PCH types. We'll probably start replacing checks like: if (HAS_PCH_SPLIT(dev)) foo(); else bar(); with: if (HAS_PCH_NEW(dev)) baz(); else if (HAS_PCH_OLD(dev) || HAS_PCH_IBX(dev)) foo(); else bar(); and this may break gen 2/3/4. As far as we have investigated, this patch will affect the behavior of intel_hdmi_dpms and intel_dp_link_down on gen 4. In both functions the code inside the HAS_PCH_IBX check is for IBX-specific workarounds, so we should be safe. If we start bisecting gen 2/3/4 bugs to this commit we should consider replacing the HAS_PCH_IBX checks with something else. V2: Improve commit message, list possible side effects and solution. Signed-off-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5a529b69110f..57d05c798c4b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -336,6 +336,7 @@ enum no_fbc_reason { }; enum intel_pch { + PCH_NONE = 0, /* No PCH present */ PCH_IBX, /* Ibexpeak PCH */ PCH_CPT, /* Cougarpoint PCH */ PCH_LPT, /* Lynxpoint PCH */ -- cgit v1.2.3 From 45e6e3a1cd2f53fd5996e75b4029defed97ca585 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Tue, 3 Jul 2012 15:57:32 -0300 Subject: drm/i915: get rid of dev_priv->info->has_pch_split Previously we had has_pch_split to tell us whether we had a PCH or not and we also had dev_priv->pch_type to tell us which kind of PCH it was, but it could only be used if we were 100% sure we did have a PCH. Now that PCH_NONE was added to dev_priv->pch_type we don't need has_pch_split anymore: we can just check for pch_type != PCH_NONE. The HAS_PCH_{IBX,CPT,LPT} macros use dev_priv->pch_type, so they can only be called after intel_detect_pch. The HAS_PCH_SPLIT macro looks at dev_priv->info->has_pch_split, which is available earlier. Since the goal is to implement HAS_PCH_SPLIT using dev_priv->pch_type instead of dev_priv->info->has_pch_split, we need to make sure that intel_detect_pch is called before any calls to HAS_PCH_SPLIT are made. So we moved the intel_detect_pch call to an earlier stage. Signed-off-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 5 +++-- drivers/gpu/drm/i915/i915_drv.c | 8 -------- drivers/gpu/drm/i915/i915_drv.h | 3 +-- 3 files changed, 4 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 4dc76976c0d0..f64ef4b723fd 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1547,6 +1547,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto out_mtrrfree; } + /* This must be called before any calls to HAS_PCH_* */ + intel_detect_pch(dev); + intel_irq_init(dev); intel_gt_init(dev); @@ -1599,8 +1602,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) /* Start out suspended */ dev_priv->mm.suspended = 1; - intel_detect_pch(dev); - if (drm_core_check_feature(dev, DRIVER_MODESET)) { ret = i915_load_modeset_init(dev); if (ret < 0) { diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index c7e76e03a684..f2c0100e0630 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -216,7 +216,6 @@ static const struct intel_device_info intel_ironlake_d_info = { .gen = 5, .need_gfx_hws = 1, .has_hotplug = 1, .has_bsd_ring = 1, - .has_pch_split = 1, }; static const struct intel_device_info intel_ironlake_m_info = { @@ -224,7 +223,6 @@ static const struct intel_device_info intel_ironlake_m_info = { .need_gfx_hws = 1, .has_hotplug = 1, .has_fbc = 1, .has_bsd_ring = 1, - .has_pch_split = 1, }; static const struct intel_device_info intel_sandybridge_d_info = { @@ -233,7 +231,6 @@ static const struct intel_device_info intel_sandybridge_d_info = { .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, - .has_pch_split = 1, .has_force_wake = 1, }; @@ -244,7 +241,6 @@ static const struct intel_device_info intel_sandybridge_m_info = { .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, - .has_pch_split = 1, .has_force_wake = 1, }; @@ -254,7 +250,6 @@ static const struct intel_device_info intel_ivybridge_d_info = { .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, - .has_pch_split = 1, .has_force_wake = 1, }; @@ -265,7 +260,6 @@ static const struct intel_device_info intel_ivybridge_m_info = { .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, - .has_pch_split = 1, .has_force_wake = 1, }; @@ -293,7 +287,6 @@ static const struct intel_device_info intel_haswell_d_info = { .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, - .has_pch_split = 1, .has_force_wake = 1, }; @@ -303,7 +296,6 @@ static const struct intel_device_info intel_haswell_m_info = { .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, - .has_pch_split = 1, .has_force_wake = 1, }; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 57d05c798c4b..d839e4c24d69 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -288,7 +288,6 @@ struct intel_device_info { u8 is_crestline:1; u8 is_ivybridge:1; u8 is_valleyview:1; - u8 has_pch_split:1; u8 has_force_wake:1; u8 is_haswell:1; u8 has_fbc:1; @@ -1121,13 +1120,13 @@ struct drm_i915_file_private { #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) -#define HAS_PCH_SPLIT(dev) (INTEL_INFO(dev)->has_pch_split) #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) +#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) -- cgit v1.2.3 From 40579abed0624ce1dd0e54da312566dcc5f0622a Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Tue, 3 Jul 2012 15:57:33 -0300 Subject: drm/i915: don't ironlake_init_pch_refclk() on LPT This function is used to set the PCH_DREF_CONTROL register, which does not exist on LPT anymore. Signed-off-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/intel_display.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f2c0100e0630..6edb2d5ec13c 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -504,7 +504,7 @@ static int i915_drm_thaw(struct drm_device *dev) /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { - if (HAS_PCH_SPLIT(dev)) + if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) ironlake_init_pch_refclk(dev); mutex_lock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index bd3366e755e1..b5ee440cd621 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6859,7 +6859,7 @@ static void intel_setup_outputs(struct drm_device *dev) /* disable all the possible outputs/crtcs before entering KMS mode */ drm_helper_disable_unused_functions(dev); - if (HAS_PCH_SPLIT(dev)) + if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) ironlake_init_pch_refclk(dev); } -- cgit v1.2.3 From a8f78b582236a7e518ebcad8161b5599d3c92f5b Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Thu, 28 Jun 2012 15:55:35 -0300 Subject: drm/i915: re-initialize DDI buffer translations after resume This is necessary for the modesetting to work correctly after a suspend-resume cycle. Without this, the pipes and clocks got the correct configuration, but the underlying DDI buffers configuration was lost. Signed-off-by: Eugeni Dodonov Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b5ee440cd621..4d84fcdd413f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -7169,6 +7169,8 @@ static void ivb_pch_pwm_override(struct drm_device *dev) void intel_modeset_init_hw(struct drm_device *dev) { + intel_prepare_ddi(dev); + intel_init_clock_gating(dev); mutex_lock(&dev->struct_mutex); @@ -7198,8 +7200,6 @@ void intel_modeset_init(struct drm_device *dev) intel_init_pm(dev); - intel_prepare_ddi(dev); - intel_init_display(dev); if (IS_GEN2(dev)) { -- cgit v1.2.3 From 4c243e25739252ce08718f519a2d567fd1d91ff0 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Thu, 28 Jun 2012 17:11:43 -0300 Subject: drm/i915: prevent bogus intel_update_fbc notifications This pollutes dmesg output even if we do not have FBC for the device, so move the DRM_DEBUG_KMS statement lower. v2: just kill the message as suggested by Daniel. Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 0ed02c102fc2..0bb69fd255a9 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -387,8 +387,6 @@ void intel_update_fbc(struct drm_device *dev) struct drm_i915_gem_object *obj; int enable_fbc; - DRM_DEBUG_KMS("\n"); - if (!i915_powersave) return; -- cgit v1.2.3 From 4c3c115a1eaea5c699caa49e824df6e5b48396a4 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Fri, 29 Jun 2012 15:39:32 -0300 Subject: drm/i915: fix PIPE_DDI_PORT_MASK Only bits 30:28, bit 31 is PIPE_DDI_FUNC_ENABLE. Signed-off-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 4ddc62ecf839..1bd17e7dd479 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4283,7 +4283,7 @@ PIPE_DDI_FUNC_CTL_B) #define PIPE_DDI_FUNC_ENABLE (1<<31) /* Those bits are ignored by pipe EDP since it can only connect to DDI A */ -#define PIPE_DDI_PORT_MASK (0xf<<28) +#define PIPE_DDI_PORT_MASK (7<<28) #define PIPE_DDI_SELECT_PORT(x) ((x)<<28) #define PIPE_DDI_MODE_SELECT_HDMI (0<<24) #define PIPE_DDI_MODE_SELECT_DVI (1<<24) -- cgit v1.2.3 From d54a02c041ccfdcfe3efcd1e5b90c6e8d5e7a8d9 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 4 Jul 2012 22:18:39 +0200 Subject: drm/i915: don't trylock in the gpu reset code Simply failing to reset the gpu because someone else might still hold the mutex isn't a great idea - I see reliable silent reset failures. And gpu reset simply needs to be reliable and Just Work. "But ... the deadlocks!" We already kick all processes waiting for the gpu before launching the reset work item. New waiters need to check the wedging state anyway and then bail out. If we have places that can deadlock, we simply need to fix them. "But ... testing!" We have the gpu hangman, and if the current gpu load gem_exec_nop isn't good enough to hit a specific case, we can add a new one. "But ... don't we return -EAGAIN for non-interruptible calls to wait_seqno now?" Yep, but this problem already exists in the current code. A follow up patch will remedy this by returning -EIO for non-interruptible sleeps if the gpu died and the low-level wait bails out with -EAGAIN. Reviewed-by: Chris Wilson Tested-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 6edb2d5ec13c..e754cdfaec79 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -730,8 +730,7 @@ int i915_reset(struct drm_device *dev) if (!i915_try_reset) return 0; - if (!mutex_trylock(&dev->struct_mutex)) - return -EBUSY; + mutex_lock(&dev->struct_mutex); i915_gem_reset(dev); -- cgit v1.2.3 From d6b2c790a4742a69624e6884b48e5d72f275abd0 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 4 Jul 2012 22:54:13 +0200 Subject: drm/i915: non-interruptible sleeps can't handle -EAGAIN So don't return -EAGAIN, even in the case of a gpu hang. Remap it to -EIO instead. Note that this isn't really an issue with interruptability, but more that we have quite a few codepaths (mostly around kms stuff) that simply can't handle any errors and hence not even -EAGAIN. Instead of adding proper failure paths so that we could restart these ioctls we've opted for the cheap way out of sleeping non-interruptibly. Which works everywhere but when the gpu dies, which this patch fixes. So essentially interruptible == false means 'wait for the gpu or die trying'.' This patch is a bit ugly because intel_ring_begin is all non-interruptible and hence only returns -EIO. But as the comment in there says, auditing all the callsites would be a pain. To avoid duplicating code, reuse i915_gem_check_wedge in __wait_seqno and intel_wait_ring_buffer. Also use the opportunity to clarify the different cases in i915_gem_check_wedge a bit with comments. v2: Don't access dev_priv->mm.interruptible from check_wedge - we might not hold dev->struct_mutex, making this racy. Instead pass interruptible in as a parameter. I've noticed this because I've hit a BUG_ON(!mutex_is_locked) at the top of check_wedge. This has been added in commit b4aca0106c466b5a0329318203f65bac2d91b682 Author: Ben Widawsky Date: Wed Apr 25 20:50:12 2012 -0700 drm/i915: extract some common olr+wedge code although that commit is missing any justification for this. I guess it's just copy&paste, because the same commit add the same BUG_ON check to check_olr, where it indeed makes sense. But in check_wedge everything we access is protected by other means, so this is superflous. And because it now gets in the way (we add a new caller in __wait_seqno, which can be called without dev->struct_mutext) let's just remove it. v3: Group all the i915_gem_check_wedge refactoring into this patch, so that this patch here is all about not returning -EAGAIN to callsites that can't handle syscall restarting. v4: Add clarification what interuptible == fales means in our code, requested by Ben Widawsky. v5: Fix EAGAIN mispell noticed by Chris Wilson. Reviewed-by: Ben Widawsky Reviewed-by: Chris Wilson Tested-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 2 ++ drivers/gpu/drm/i915/i915_gem.c | 26 ++++++++++++++++++-------- drivers/gpu/drm/i915/intel_ringbuffer.c | 6 ++++-- 3 files changed, 24 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d839e4c24d69..6c3a0bb91851 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1336,6 +1336,8 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) void i915_gem_retire_requests(struct drm_device *dev); void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); +int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv, + bool interruptible); void i915_gem_reset(struct drm_device *dev); void i915_gem_clflush_object(struct drm_i915_gem_object *obj); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6a98c0659324..af6a510367af 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1863,11 +1863,10 @@ i915_gem_retire_work_handler(struct work_struct *work) mutex_unlock(&dev->struct_mutex); } -static int -i915_gem_check_wedge(struct drm_i915_private *dev_priv) +int +i915_gem_check_wedge(struct drm_i915_private *dev_priv, + bool interruptible) { - BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); - if (atomic_read(&dev_priv->mm.wedged)) { struct completion *x = &dev_priv->error_completion; bool recovery_complete; @@ -1878,7 +1877,16 @@ i915_gem_check_wedge(struct drm_i915_private *dev_priv) recovery_complete = x->done > 0; spin_unlock_irqrestore(&x->wait.lock, flags); - return recovery_complete ? -EIO : -EAGAIN; + /* Non-interruptible callers can't handle -EAGAIN, hence return + * -EIO unconditionally for these. */ + if (!interruptible) + return -EIO; + + /* Recovery complete, but still wedged means reset failure. */ + if (recovery_complete) + return -EIO; + + return -EAGAIN; } return 0; @@ -1932,6 +1940,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, unsigned long timeout_jiffies; long end; bool wait_forever = true; + int ret; if (i915_seqno_passed(ring->get_seqno(ring), seqno)) return 0; @@ -1963,8 +1972,9 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, end = wait_event_timeout(ring->irq_queue, EXIT_COND, timeout_jiffies); - if (atomic_read(&dev_priv->mm.wedged)) - end = -EAGAIN; + ret = i915_gem_check_wedge(dev_priv, interruptible); + if (ret) + end = ret; } while (end == 0 && wait_forever); getrawmonotonic(&now); @@ -2004,7 +2014,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) BUG_ON(seqno == 0); - ret = i915_gem_check_wedge(dev_priv); + ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index dce4d1a492a8..cd35ad4922de 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1228,8 +1228,10 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) } msleep(1); - if (atomic_read(&dev_priv->mm.wedged)) - return -EAGAIN; + + ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); + if (ret) + return ret; } while (!time_after(jiffies, end)); trace_i915_ring_wait_end(ring); return -EBUSY; -- cgit v1.2.3 From 0a6759c6bacb3998e3d9a7cf690177051238ad87 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 4 Jul 2012 22:18:41 +0200 Subject: drm/i915: don't hang userspace when the gpu reset is stuck With the gpu reset no longer using a trylock we've increased the chances of userspace getting stuck quite a bit. To make that (hopefully) rare case more paletable time out when waiting for the gpu reset code to complete and signal this little issue to the caller by returning -EIO. This should help userspace to somewhat gracefully fall back and hopefully allow the user to grab some logs and reboot the machine (instead of staring at a frozen X screen in agony). Suggested by Chris Wilson because I've been stubborn about allowing the gpu reset code no to fail, ever (by removing the trylock). Reviewed-by: Chris Wilson Tested-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index af6a510367af..7d285554333f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -96,9 +96,18 @@ i915_gem_wait_for_error(struct drm_device *dev) if (!atomic_read(&dev_priv->mm.wedged)) return 0; - ret = wait_for_completion_interruptible(x); - if (ret) + /* + * Only wait 10 seconds for the gpu reset to complete to avoid hanging + * userspace. If it takes that long something really bad is going on and + * we should simply try to bail out and fail as gracefully as possible. + */ + ret = wait_for_completion_interruptible_timeout(x, 10*HZ); + if (ret == 0) { + DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); + return -EIO; + } else if (ret < 0) { return ret; + } if (atomic_read(&dev_priv->mm.wedged)) { /* GPU is hung, bump the completion count to account for -- cgit v1.2.3 From a9340ccab547f24e3b398b7e3ebd792827ff1be1 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 4 Jul 2012 22:18:42 +0200 Subject: drm/i915: properly SIGBUS on I/O errors ... instead of looping endless with no hope of ever serving that page-fault. We only need to break out of this loop when the gpu died, to run the reset work (and hopefully resurrect it). To clarify questions Chris raised on irc: This is about handling I/O errors not from our own code, but e.g. when the disk died when trying to swap in a gem bo. So this patch remidies the issue that the current handling only handles gpu-death-induced cases of -EIO. Admittedly, dying disks are much rarer than hanging gpus ...To clarify questions Chris raised on irc: This is about handling I/O errors not from our own code, but e.g. when the disk died when trying to swap in a gem bo. So this patch remidies the issue that the current handling only handles gpu-death-induced cases of -EIO. Admittedly, dying disks are much rarer than hanging gpus ... This seems to have been lost in: commit d9bc7e9f32716901c617e1f0fb6ce0f74f172686 Author: Chris Wilson Date: Mon Feb 7 13:09:31 2011 +0000 drm/i915: Fix infinite loop regression from 21dd3734 Reviewed-by: Chris Wilson Tested-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7d285554333f..2b54142a46ed 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1141,6 +1141,11 @@ unlock: out: switch (ret) { case -EIO: + /* If this -EIO is due to a gpu hang, give the reset code a + * chance to clean up the mess. Otherwise return the proper + * SIGBUS. */ + if (!atomic_read(&dev_priv->mm.wedged)) + return VM_FAULT_SIGBUS; case -EAGAIN: /* Give the error handler a chance to run and move the * objects off the GPU active list. Next time we service the -- cgit v1.2.3 From de2b998552c1534e87bfbc51ec5734b02bc89020 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 4 Jul 2012 22:52:50 +0200 Subject: drm/i915: don't return a spurious -EIO from intel_ring_begin The issue with this check is that it results in userspace receiving an -EIO while the gpu reset hasn't completed, resulting in fallback to sw rendering or worse. Now there's also a stern comment in intel_ring_wait_seqno saying that intel_ring_begin should not return -EAGAIN, ever, because some callers can't handle that. But after an audit of the callsites I don't see any issues. I guess the last problematic spot disappeared with the removal of the pipelined fencing code. So do the right thing and call check_wedge, which should properly decide whether an -EAGAIN or -EIO is appropriate if wedged is set. Note that the early check for a wedged gpu before touching the ring is rather important (and it took me quite some time of acting like the densest doofus to figure that out): If we don't do that and the gpu died for good, not having been resurrect by the reset code, userspace can merrily fill up the entire ring until it notices that something is amiss. Allowing userspace to emit more render, despite that we know that it will fail can't lead to anything good (and by experience can lead to all sorts of havoc, including angering the OOM gods and hard-hanging the hw for good). v2: Fix EAGAIN mispell, noticed by Chris Wilson. Reviewed-by: Chris Wilson Tested-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index cd35ad4922de..d42d821c64d6 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1117,20 +1117,9 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; - bool was_interruptible; int ret; - /* XXX As we have not yet audited all the paths to check that - * they are ready for ERESTARTSYS from intel_ring_begin, do not - * allow us to be interruptible by a signal. - */ - was_interruptible = dev_priv->mm.interruptible; - dev_priv->mm.interruptible = false; - ret = i915_wait_seqno(ring, seqno); - - dev_priv->mm.interruptible = was_interruptible; if (!ret) i915_gem_retire_requests_ring(ring); @@ -1240,12 +1229,13 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) int intel_ring_begin(struct intel_ring_buffer *ring, int num_dwords) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; + drm_i915_private_t *dev_priv = ring->dev->dev_private; int n = 4*num_dwords; int ret; - if (unlikely(atomic_read(&dev_priv->mm.wedged))) - return -EIO; + ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); + if (ret) + return ret; if (unlikely(ring->tail + n > ring->effective_size)) { ret = intel_wrap_ring_buffer(ring); -- cgit v1.2.3 From 3a7f2f6a9e43f2c5ce45d4ca19c43bf61df3f0dd Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 24 May 2012 21:08:56 +0300 Subject: drm/i915: Zero initialize mode_cmd MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Zero initialize the mode_cmd structure when creating the kernel framebuffer. Avoids having uninitialized data in offsets[0] for instance. Signed-off-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_fb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index e9f8338bd802..97f673523b97 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -65,7 +65,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, struct drm_i915_private *dev_priv = dev->dev_private; struct fb_info *info; struct drm_framebuffer *fb; - struct drm_mode_fb_cmd2 mode_cmd; + struct drm_mode_fb_cmd2 mode_cmd = {}; struct drm_i915_gem_object *obj; struct device *device = &dev->pdev->dev; int size, ret; -- cgit v1.2.3 From e6a595d2db7ad403fcb2b7b168754d7551fc8c9b Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 24 May 2012 21:08:59 +0300 Subject: drm/i915: Reject page flips with changed format/offset/pitch MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit MI display flips can't handle some changes in the framebuffer format or layout. Return an error in such cases. Signed-off-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 4d84fcdd413f..7e31f7af0de8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6438,6 +6438,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, unsigned long flags; int ret; + /* Can't change pixel format via MI display flips. */ + if (fb->pixel_format != crtc->fb->pixel_format) + return -EINVAL; + + /* + * TILEOFF/LINOFF registers can't be changed via MI display flips. + * Note that pitch changes could also affect these register. + */ + if (INTEL_INFO(dev)->gen > 3 && + (fb->offsets[0] != crtc->fb->offsets[0] || + fb->pitches[0] != crtc->fb->pitches[0])) + return -EINVAL; + work = kzalloc(sizeof *work, GFP_KERNEL); if (work == NULL) return -ENOMEM; -- cgit v1.2.3 From e506a0c6381f180858d2e343c3ed5c0bde8e84ba Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 5 Jul 2012 12:17:29 +0200 Subject: drm/i915: introduce crtc->dspaddr_offset To avoid recomputing the display framebuffer offset on gen2/3 pageflips. This is also prep work to do similar trickery on gen4+ Also: - kill "Start", such upper-case remnants from the ddx must surely die. - rename "Offset" to linear_offset, to make it clearer that on gen4+ this is only used by the hw for linear buffers, for tiled buffers it uses the TILEOFF register. - call DSAPADDR DSPLINOFF on gen4+ for the same reason (and because the documentation really renamed the register). Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_display.c | 46 ++++++++++++++++-------------------- drivers/gpu/drm/i915/intel_drv.h | 5 ++++ 3 files changed, 27 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 1bd17e7dd479..4a2ea42b5ecf 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2979,6 +2979,7 @@ #define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE) #define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF) #define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF) +#define DSPLINOFF(plane) DSPADDR(plane) /* Display/Sprite base address macros */ #define DISP_BASEADDR_MASK (0xfffff000) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 7e31f7af0de8..f8c24123dde0 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1982,7 +1982,7 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj; int plane = intel_crtc->plane; - unsigned long Start, Offset; + unsigned long linear_offset; u32 dspcntr; u32 reg; @@ -2029,18 +2029,22 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, I915_WRITE(reg, dspcntr); - Start = obj->gtt_offset; - Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); + linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); - DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", - Start, Offset, x, y, fb->pitches[0]); + if (INTEL_INFO(dev)->gen >= 4) + intel_crtc->dspaddr_offset = 0; + else + intel_crtc->dspaddr_offset = linear_offset; + + DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", + obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); if (INTEL_INFO(dev)->gen >= 4) { - I915_MODIFY_DISPBASE(DSPSURF(plane), Start); + I915_MODIFY_DISPBASE(DSPSURF(plane), obj->gtt_offset); I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); - I915_WRITE(DSPADDR(plane), Offset); + I915_WRITE(DSPLINOFF(plane), linear_offset); } else - I915_WRITE(DSPADDR(plane), Start + Offset); + I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset); POSTING_READ(reg); return 0; @@ -2055,7 +2059,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc, struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj; int plane = intel_crtc->plane; - unsigned long Start, Offset; + unsigned long linear_offset; u32 dspcntr; u32 reg; @@ -2110,15 +2114,15 @@ static int ironlake_update_plane(struct drm_crtc *crtc, I915_WRITE(reg, dspcntr); - Start = obj->gtt_offset; - Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); + linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); + intel_crtc->dspaddr_offset = 0; - DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", - Start, Offset, x, y, fb->pitches[0]); + DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", + obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); - I915_MODIFY_DISPBASE(DSPSURF(plane), Start); + I915_MODIFY_DISPBASE(DSPSURF(plane), obj->gtt_offset); I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); - I915_WRITE(DSPADDR(plane), Offset); + I915_WRITE(DSPLINOFF(plane), linear_offset); POSTING_READ(reg); return 0; @@ -6194,7 +6198,6 @@ static int intel_gen2_queue_flip(struct drm_device *dev, { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - unsigned long offset; u32 flip_mask; struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; int ret; @@ -6203,9 +6206,6 @@ static int intel_gen2_queue_flip(struct drm_device *dev, if (ret) goto err; - /* Offset into the new buffer for cases of shared fbs between CRTCs */ - offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; - ret = intel_ring_begin(ring, 6); if (ret) goto err_unpin; @@ -6222,7 +6222,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev, intel_ring_emit(ring, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(ring, fb->pitches[0]); - intel_ring_emit(ring, obj->gtt_offset + offset); + intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); intel_ring_emit(ring, 0); /* aux display base address, unused */ intel_ring_advance(ring); return 0; @@ -6240,7 +6240,6 @@ static int intel_gen3_queue_flip(struct drm_device *dev, { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - unsigned long offset; u32 flip_mask; struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; int ret; @@ -6249,9 +6248,6 @@ static int intel_gen3_queue_flip(struct drm_device *dev, if (ret) goto err; - /* Offset into the new buffer for cases of shared fbs between CRTCs */ - offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; - ret = intel_ring_begin(ring, 6); if (ret) goto err_unpin; @@ -6265,7 +6261,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev, intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(ring, fb->pitches[0]); - intel_ring_emit(ring, obj->gtt_offset + offset); + intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index bc6d6165e37e..b7859e7110a7 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -177,6 +177,11 @@ struct intel_crtc { struct intel_unpin_work *unpin_work; int fdi_lanes; + /* Display surface base address adjustement for pageflips. Note that on + * gen4+ this only adjusts up to a tile, offsets within a tile are + * handled in the hw itself (with the TILEOFF register). */ + unsigned long dspaddr_offset; + struct drm_i915_gem_object *cursor_bo; uint32_t cursor_addr; int16_t cursor_x, cursor_y; -- cgit v1.2.3 From c2c75131244507c93f812862fdbd4f3a37139401 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 5 Jul 2012 12:17:30 +0200 Subject: drm/i915: adjust framebuffer base address on gen4+ The tileoffset register only supports a limited offset in x/y of 4096, so for giant screen configuration with a shared fb we wrap around. Fix this by computing a linear offset in tiles (pages) and only use the tileoffset register to offset within the tile. Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 2 +- drivers/gpu/drm/i915/intel_display.c | 47 +++++++++++++++++++++++++++++------- 2 files changed, 39 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 4a2ea42b5ecf..da7484ec3bfb 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2986,7 +2986,7 @@ #define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK) #define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK) #define I915_MODIFY_DISPBASE(reg, gfx_addr) \ - (I915_WRITE(reg, gfx_addr | I915_LO_DISPBASE(I915_READ(reg)))) + (I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg)))) /* VBIOS flags */ #define SWF00 0x71410 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f8c24123dde0..ca1068bc892f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1973,6 +1973,22 @@ void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) i915_gem_object_unpin(obj); } +/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel + * is assumed to be a power-of-two. */ +static unsigned long gen4_compute_dspaddr_offset_xtiled(int *x, int *y, + unsigned int bpp, + unsigned int pitch) +{ + int tile_rows, tiles; + + tile_rows = *y / 8; + *y %= 8; + tiles = *x / (512/bpp); + *x %= 512/bpp; + + return tile_rows * pitch * 8 + tiles * 4096; +} + static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y) { @@ -2031,16 +2047,22 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); - if (INTEL_INFO(dev)->gen >= 4) - intel_crtc->dspaddr_offset = 0; - else + if (INTEL_INFO(dev)->gen >= 4) { + intel_crtc->dspaddr_offset = + gen4_compute_dspaddr_offset_xtiled(&x, &y, + fb->bits_per_pixel / 8, + fb->pitches[0]); + linear_offset -= intel_crtc->dspaddr_offset; + } else { intel_crtc->dspaddr_offset = linear_offset; + } DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); if (INTEL_INFO(dev)->gen >= 4) { - I915_MODIFY_DISPBASE(DSPSURF(plane), obj->gtt_offset); + I915_MODIFY_DISPBASE(DSPSURF(plane), + obj->gtt_offset + intel_crtc->dspaddr_offset); I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); I915_WRITE(DSPLINOFF(plane), linear_offset); } else @@ -2115,12 +2137,17 @@ static int ironlake_update_plane(struct drm_crtc *crtc, I915_WRITE(reg, dspcntr); linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); - intel_crtc->dspaddr_offset = 0; + intel_crtc->dspaddr_offset = + gen4_compute_dspaddr_offset_xtiled(&x, &y, + fb->bits_per_pixel / 8, + fb->pitches[0]); + linear_offset -= intel_crtc->dspaddr_offset; DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); - I915_MODIFY_DISPBASE(DSPSURF(plane), obj->gtt_offset); + I915_MODIFY_DISPBASE(DSPSURF(plane), + obj->gtt_offset + intel_crtc->dspaddr_offset); I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); I915_WRITE(DSPLINOFF(plane), linear_offset); POSTING_READ(reg); @@ -6299,7 +6326,9 @@ static int intel_gen4_queue_flip(struct drm_device *dev, intel_ring_emit(ring, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(ring, fb->pitches[0]); - intel_ring_emit(ring, obj->gtt_offset | obj->tiling_mode); + intel_ring_emit(ring, + (obj->gtt_offset + intel_crtc->dspaddr_offset) | + obj->tiling_mode); /* XXX Enabling the panel-fitter across page-flip is so far * untested on non-native modes, so ignore it for now. @@ -6339,7 +6368,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, intel_ring_emit(ring, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); - intel_ring_emit(ring, obj->gtt_offset); + intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); /* Contrary to the suggestions in the documentation, * "Enable Panel Fitter" does not seem to be required when page @@ -6402,7 +6431,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); - intel_ring_emit(ring, (obj->gtt_offset)); + intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); intel_ring_emit(ring, (MI_NOOP)); intel_ring_advance(ring); return 0; -- cgit v1.2.3 From 6c2b7c1208b762abc0df318ae53d18d9e5414e1b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 5 Jul 2012 09:50:24 +0200 Subject: drm/i915: introduce for_each_encoder_on_crtc We already have this pattern at quite a few places, and moving part of the modeset helper stuff into the driver will add more. v2: Don't clobber the crtc struct name with the macro parameter ... v3: Convert two more places noticed by Paulo Zanoni. Reviewed-by: Paulo Zanoni Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 4 ++++ drivers/gpu/drm/i915/intel_display.c | 38 +++++++++--------------------------- drivers/gpu/drm/i915/intel_dp.c | 22 ++++++--------------- drivers/gpu/drm/i915/intel_lvds.c | 6 +++--- drivers/gpu/drm/i915/intel_tv.c | 10 +++------- 5 files changed, 25 insertions(+), 55 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6c3a0bb91851..476c64c4844c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -79,6 +79,10 @@ enum port { #define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) +#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ + list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ + if ((intel_encoder)->base.crtc == (__crtc)) + struct intel_pch_pll { int refcount; /* count of number of CRTCs sharing this PLL */ int active; /* count of number of active CRTCs (i.e. DPMS on) */ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ca1068bc892f..0972f49e15d8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -627,11 +627,10 @@ static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock bool intel_pipe_has_type(struct drm_crtc *crtc, int type) { struct drm_device *dev = crtc->dev; - struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *encoder; - list_for_each_entry(encoder, &mode_config->encoder_list, base.head) - if (encoder->base.crtc == crtc && encoder->type == type) + for_each_encoder_on_crtc(dev, crtc, encoder) + if (encoder->type == type) return true; return false; @@ -2836,16 +2835,13 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) static bool intel_crtc_driving_pch(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *encoder; /* * If there's a non-PCH eDP on this crtc, it must be DP_A, and that * must be driven by its own crtc; no sharing is possible. */ - list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { - if (encoder->base.crtc != crtc) - continue; + for_each_encoder_on_crtc(dev, crtc, encoder) { /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell * CPU handles all others */ @@ -3734,16 +3730,12 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_encoder *encoder; struct drm_connector *connector; + struct intel_encoder *intel_encoder; unsigned int display_bpc = UINT_MAX, bpc; /* Walk the encoders & connectors on this crtc, get min bpc */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - struct intel_encoder *intel_encoder = to_intel_encoder(encoder); - - if (encoder->crtc != crtc) - continue; + for_each_encoder_on_crtc(dev, crtc, intel_encoder) { if (intel_encoder->type == INTEL_OUTPUT_LVDS) { unsigned int lvds_bpc; @@ -3775,7 +3767,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, /* Not one of the known troublemakers, check the EDID */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (connector->encoder != encoder) + if (connector->encoder != &intel_encoder->base) continue; /* Don't use an invalid EDID bpc value */ @@ -4244,15 +4236,11 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, u32 dspcntr, pipeconf, vsyncshift; bool ok, has_reduced_clock = false, is_sdvo = false; bool is_lvds = false, is_tv = false, is_dp = false; - struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *encoder; const intel_limit_t *limit; int ret; - list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { - if (encoder->base.crtc != crtc) - continue; - + for_each_encoder_on_crtc(dev, crtc, encoder) { switch (encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; @@ -4555,15 +4543,11 @@ static int ironlake_get_refclk(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *encoder; - struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *edp_encoder = NULL; int num_connectors = 0; bool is_lvds = false; - list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { - if (encoder->base.crtc != crtc) - continue; - + for_each_encoder_on_crtc(dev, crtc, encoder) { switch (encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; @@ -4600,7 +4584,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; bool ok, has_reduced_clock = false, is_sdvo = false; bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; - struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *encoder, *edp_encoder = NULL; const intel_limit_t *limit; int ret; @@ -4611,10 +4594,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, bool dither; bool is_cpu_edp = false, is_pch_edp = false; - list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { - if (encoder->base.crtc != crtc) - continue; - + for_each_encoder_on_crtc(dev, crtc, encoder) { switch (encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 611080b32e40..95817c4edbe9 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -793,8 +793,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = crtc->dev; - struct drm_mode_config *mode_config = &dev->mode_config; - struct drm_encoder *encoder; + struct intel_encoder *encoder; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int lane_count = 4; @@ -804,13 +803,9 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, /* * Find the lane count in the intel_encoder private */ - list_for_each_entry(encoder, &mode_config->encoder_list, head) { - struct intel_dp *intel_dp; + for_each_encoder_on_crtc(dev, crtc, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - if (encoder->crtc != crtc) - continue; - - intel_dp = enc_to_intel_dp(encoder); if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || intel_dp->base.type == INTEL_OUTPUT_EDP) { @@ -2404,16 +2399,11 @@ int intel_trans_dp_port_sel(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_mode_config *mode_config = &dev->mode_config; - struct drm_encoder *encoder; + struct intel_encoder *encoder; - list_for_each_entry(encoder, &mode_config->encoder_list, head) { - struct intel_dp *intel_dp; - - if (encoder->crtc != crtc) - continue; + for_each_encoder_on_crtc(dev, crtc, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - intel_dp = enc_to_intel_dp(encoder); if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || intel_dp->base.type == INTEL_OUTPUT_EDP) return intel_dp->output_reg; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 05fcadbeac66..9b706a540d70 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -236,7 +236,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_lvds *intel_lvds = to_intel_lvds(encoder); - struct drm_encoder *tmp_encoder; + struct intel_encoder *tmp_encoder; u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; int pipe; @@ -247,8 +247,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, } /* Should never happen!! */ - list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) { - if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) { + for_each_encoder_on_crtc(dev, encoder->crtc, tmp_encoder) { + if (&tmp_encoder->base != encoder) { DRM_ERROR("Can't enable LVDS and another " "encoder on the same pipe\n"); return false; diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index a233a51fd7e6..3b413c9042c0 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -895,20 +895,16 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; - struct drm_mode_config *drm_config = &dev->mode_config; struct intel_tv *intel_tv = enc_to_intel_tv(encoder); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); - struct drm_encoder *other_encoder; + struct intel_encoder *other_encoder; if (!tv_mode) return false; - /* FIXME: lock encoder list */ - list_for_each_entry(other_encoder, &drm_config->encoder_list, head) { - if (other_encoder != encoder && - other_encoder->crtc == encoder->crtc) + for_each_encoder_on_crtc(dev, encoder->crtc, other_encoder) + if (&other_encoder->base != encoder) return false; - } adjusted_mode->clock = tv_mode->clock; return true; -- cgit v1.2.3 From 4acf518626cdad5bbf7aac9869bd4accbbfb4ad3 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Wed, 4 Jul 2012 20:15:16 -0300 Subject: drm/i915: program FDI_RX TP and FDI delays This is required for a stable FDI connection. v2: fix and simplify the FDI_RX_MISC bits as noticed by Paulo Zanoni. CC: Paulo Zanoni Signed-off-by: Eugeni Dodonov Reviewed-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 3 +++ drivers/gpu/drm/i915/intel_ddi.c | 9 +++++++++ 2 files changed, 12 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index da7484ec3bfb..1218069c7f66 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3854,6 +3854,9 @@ #define _FDI_RXA_TUSIZE2 0xf0038 #define _FDI_RXB_TUSIZE1 0xf1030 #define _FDI_RXB_TUSIZE2 0xf1038 +#define FDI_RX_TP1_TO_TP2_48 (2<<20) +#define FDI_RX_TP1_TO_TP2_64 (3<<20) +#define FDI_RX_FDI_DELAY_90 (0x90<<0) #define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC) #define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1) #define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index f33fe1a1c33e..933c74859172 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -170,6 +170,15 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) udelay(600); + /* We need to program FDI_RX_MISC with the default TP1 to TP2 + * values before enabling the receiver, and configure the delay + * for the FDI timing generator to 90h. Luckily, all the other + * bits are supposed to be zeroed, so we can write those values + * directly. + */ + I915_WRITE(FDI_RX_MISC(pipe), FDI_RX_TP1_TO_TP2_48 | + FDI_RX_FDI_DELAY_90); + /* Enable CPU FDI Receiver with auto-training */ reg = FDI_RX_CTL(pipe); I915_WRITE(reg, -- cgit v1.2.3 From 49099c4991da3c94773f888aea2e9d27b8a7c6d1 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Fri, 6 Jul 2012 18:06:42 +0100 Subject: drm: fail gracefully when proc isn't setup. If drm can't find proc it should fail more gracefully, than just oopsing, this tests drm_class is NULL, and sets it to NULL in the fail paths. Reported-by: Fengguang Wu Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_sysfs.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 45cf1dd3eb9c..45ac8d6c92b7 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -134,6 +134,7 @@ void drm_sysfs_destroy(void) return; class_remove_file(drm_class, &class_attr_version.attr); class_destroy(drm_class); + drm_class = NULL; } /** @@ -554,6 +555,9 @@ void drm_sysfs_device_remove(struct drm_minor *minor) int drm_class_device_register(struct device *dev) { + if (!drm_class || IS_ERR(drm_class)) + return -ENOENT; + dev->class = drm_class; return device_register(dev); } -- cgit v1.2.3 From 6b9d89b4365ab52bc26f8259122f422e93d87821 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 10 Jul 2012 11:15:23 +0100 Subject: drm: Add colouring to the range allocator In order to support snoopable memory on non-LLC architectures (so that we can bind vgem objects into the i915 GATT for example), we have to avoid the prefetcher on the GPU from crossing memory domains and so prevent allocation of a snoopable PTE immediately following an uncached PTE. To do that, we need to extend the range allocator with support for tracking and segregating different node colours. This will be used by i915 to segregate memory domains within the GTT. v2: Now with more drm_mm helpers and less driver interference. Signed-off-by: Chris Wilson Cc: Dave Airlie Cc: Ben Skeggs Cc: Jerome Glisse Cc: Alex Deucher Reviewed-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_gem.c | 2 +- drivers/gpu/drm/drm_mm.c | 169 +++++++++++++++++++++------------- drivers/gpu/drm/i915/i915_gem.c | 6 +- drivers/gpu/drm/i915/i915_gem_evict.c | 9 +- include/drm/drm_mm.h | 93 ++++++++++++++++--- 5 files changed, 191 insertions(+), 88 deletions(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index d58e69da1fb5..fbe0842038b5 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -354,7 +354,7 @@ drm_gem_create_mmap_offset(struct drm_gem_object *obj) /* Get a DRM GEM mmap offset allocated... */ list->file_offset_node = drm_mm_search_free(&mm->offset_manager, - obj->size / PAGE_SIZE, 0, 0); + obj->size / PAGE_SIZE, 0, false); if (!list->file_offset_node) { DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 961fb54f4266..9bb82f7f0061 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -118,45 +118,53 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) static void drm_mm_insert_helper(struct drm_mm_node *hole_node, struct drm_mm_node *node, - unsigned long size, unsigned alignment) + unsigned long size, unsigned alignment, + unsigned long color) { struct drm_mm *mm = hole_node->mm; - unsigned long tmp = 0, wasted = 0; unsigned long hole_start = drm_mm_hole_node_start(hole_node); unsigned long hole_end = drm_mm_hole_node_end(hole_node); + unsigned long adj_start = hole_start; + unsigned long adj_end = hole_end; BUG_ON(!hole_node->hole_follows || node->allocated); - if (alignment) - tmp = hole_start % alignment; + if (mm->color_adjust) + mm->color_adjust(hole_node, color, &adj_start, &adj_end); - if (!tmp) { + if (alignment) { + unsigned tmp = adj_start % alignment; + if (tmp) + adj_start += alignment - tmp; + } + + if (adj_start == hole_start) { hole_node->hole_follows = 0; - list_del_init(&hole_node->hole_stack); - } else - wasted = alignment - tmp; + list_del(&hole_node->hole_stack); + } - node->start = hole_start + wasted; + node->start = adj_start; node->size = size; node->mm = mm; + node->color = color; node->allocated = 1; INIT_LIST_HEAD(&node->hole_stack); list_add(&node->node_list, &hole_node->node_list); - BUG_ON(node->start + node->size > hole_end); + BUG_ON(node->start + node->size > adj_end); + node->hole_follows = 0; if (node->start + node->size < hole_end) { list_add(&node->hole_stack, &mm->hole_stack); node->hole_follows = 1; - } else { - node->hole_follows = 0; } } struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, unsigned long size, unsigned alignment, + unsigned long color, int atomic) { struct drm_mm_node *node; @@ -165,7 +173,7 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, if (unlikely(node == NULL)) return NULL; - drm_mm_insert_helper(hole_node, node, size, alignment); + drm_mm_insert_helper(hole_node, node, size, alignment, color); return node; } @@ -181,11 +189,11 @@ int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node, { struct drm_mm_node *hole_node; - hole_node = drm_mm_search_free(mm, size, alignment, 0); + hole_node = drm_mm_search_free(mm, size, alignment, false); if (!hole_node) return -ENOSPC; - drm_mm_insert_helper(hole_node, node, size, alignment); + drm_mm_insert_helper(hole_node, node, size, alignment, 0); return 0; } @@ -194,50 +202,57 @@ EXPORT_SYMBOL(drm_mm_insert_node); static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, struct drm_mm_node *node, unsigned long size, unsigned alignment, + unsigned long color, unsigned long start, unsigned long end) { struct drm_mm *mm = hole_node->mm; - unsigned long tmp = 0, wasted = 0; unsigned long hole_start = drm_mm_hole_node_start(hole_node); unsigned long hole_end = drm_mm_hole_node_end(hole_node); + unsigned long adj_start = hole_start; + unsigned long adj_end = hole_end; BUG_ON(!hole_node->hole_follows || node->allocated); - if (hole_start < start) - wasted += start - hole_start; - if (alignment) - tmp = (hole_start + wasted) % alignment; + if (mm->color_adjust) + mm->color_adjust(hole_node, color, &adj_start, &adj_end); - if (tmp) - wasted += alignment - tmp; + if (adj_start < start) + adj_start = start; + + if (alignment) { + unsigned tmp = adj_start % alignment; + if (tmp) + adj_start += alignment - tmp; + } - if (!wasted) { + if (adj_start == hole_start) { hole_node->hole_follows = 0; - list_del_init(&hole_node->hole_stack); + list_del(&hole_node->hole_stack); } - node->start = hole_start + wasted; + node->start = adj_start; node->size = size; node->mm = mm; + node->color = color; node->allocated = 1; INIT_LIST_HEAD(&node->hole_stack); list_add(&node->node_list, &hole_node->node_list); - BUG_ON(node->start + node->size > hole_end); + BUG_ON(node->start + node->size > adj_end); BUG_ON(node->start + node->size > end); + node->hole_follows = 0; if (node->start + node->size < hole_end) { list_add(&node->hole_stack, &mm->hole_stack); node->hole_follows = 1; - } else { - node->hole_follows = 0; } } struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node, unsigned long size, unsigned alignment, + unsigned long color, unsigned long start, unsigned long end, int atomic) @@ -248,7 +263,7 @@ struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node if (unlikely(node == NULL)) return NULL; - drm_mm_insert_helper_range(hole_node, node, size, alignment, + drm_mm_insert_helper_range(hole_node, node, size, alignment, color, start, end); return node; @@ -267,11 +282,11 @@ int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node, struct drm_mm_node *hole_node; hole_node = drm_mm_search_free_in_range(mm, size, alignment, - start, end, 0); + start, end, false); if (!hole_node) return -ENOSPC; - drm_mm_insert_helper_range(hole_node, node, size, alignment, + drm_mm_insert_helper_range(hole_node, node, size, alignment, 0, start, end); return 0; @@ -336,27 +351,23 @@ EXPORT_SYMBOL(drm_mm_put_block); static int check_free_hole(unsigned long start, unsigned long end, unsigned long size, unsigned alignment) { - unsigned wasted = 0; - if (end - start < size) return 0; if (alignment) { unsigned tmp = start % alignment; if (tmp) - wasted = alignment - tmp; - } - - if (end >= start + size + wasted) { - return 1; + start += alignment - tmp; } - return 0; + return end >= start + size; } -struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, - unsigned long size, - unsigned alignment, int best_match) +struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, + unsigned long size, + unsigned alignment, + unsigned long color, + bool best_match) { struct drm_mm_node *entry; struct drm_mm_node *best; @@ -368,10 +379,17 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, best_size = ~0UL; list_for_each_entry(entry, &mm->hole_stack, hole_stack) { + unsigned long adj_start = drm_mm_hole_node_start(entry); + unsigned long adj_end = drm_mm_hole_node_end(entry); + + if (mm->color_adjust) { + mm->color_adjust(entry, color, &adj_start, &adj_end); + if (adj_end <= adj_start) + continue; + } + BUG_ON(!entry->hole_follows); - if (!check_free_hole(drm_mm_hole_node_start(entry), - drm_mm_hole_node_end(entry), - size, alignment)) + if (!check_free_hole(adj_start, adj_end, size, alignment)) continue; if (!best_match) @@ -385,14 +403,15 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, return best; } -EXPORT_SYMBOL(drm_mm_search_free); - -struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, - unsigned long size, - unsigned alignment, - unsigned long start, - unsigned long end, - int best_match) +EXPORT_SYMBOL(drm_mm_search_free_generic); + +struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, + unsigned long size, + unsigned alignment, + unsigned long color, + unsigned long start, + unsigned long end, + bool best_match) { struct drm_mm_node *entry; struct drm_mm_node *best; @@ -410,6 +429,13 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, end : drm_mm_hole_node_end(entry); BUG_ON(!entry->hole_follows); + + if (mm->color_adjust) { + mm->color_adjust(entry, color, &adj_start, &adj_end); + if (adj_end <= adj_start) + continue; + } + if (!check_free_hole(adj_start, adj_end, size, alignment)) continue; @@ -424,7 +450,7 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, return best; } -EXPORT_SYMBOL(drm_mm_search_free_in_range); +EXPORT_SYMBOL(drm_mm_search_free_in_range_generic); /** * Moves an allocation. To be used with embedded struct drm_mm_node. @@ -437,6 +463,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) new->mm = old->mm; new->start = old->start; new->size = old->size; + new->color = old->color; old->allocated = 0; new->allocated = 1; @@ -452,9 +479,12 @@ EXPORT_SYMBOL(drm_mm_replace_node); * Warning: As long as the scan list is non-empty, no other operations than * adding/removing nodes to/from the scan list are allowed. */ -void drm_mm_init_scan(struct drm_mm *mm, unsigned long size, - unsigned alignment) +void drm_mm_init_scan(struct drm_mm *mm, + unsigned long size, + unsigned alignment, + unsigned long color) { + mm->scan_color = color; mm->scan_alignment = alignment; mm->scan_size = size; mm->scanned_blocks = 0; @@ -474,11 +504,14 @@ EXPORT_SYMBOL(drm_mm_init_scan); * Warning: As long as the scan list is non-empty, no other operations than * adding/removing nodes to/from the scan list are allowed. */ -void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size, +void drm_mm_init_scan_with_range(struct drm_mm *mm, + unsigned long size, unsigned alignment, + unsigned long color, unsigned long start, unsigned long end) { + mm->scan_color = color; mm->scan_alignment = alignment; mm->scan_size = size; mm->scanned_blocks = 0; @@ -522,17 +555,21 @@ int drm_mm_scan_add_block(struct drm_mm_node *node) hole_start = drm_mm_hole_node_start(prev_node); hole_end = drm_mm_hole_node_end(prev_node); + + adj_start = hole_start; + adj_end = hole_end; + + if (mm->color_adjust) + mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end); + if (mm->scan_check_range) { - adj_start = hole_start < mm->scan_start ? - mm->scan_start : hole_start; - adj_end = hole_end > mm->scan_end ? - mm->scan_end : hole_end; - } else { - adj_start = hole_start; - adj_end = hole_end; + if (adj_start < mm->scan_start) + adj_start = mm->scan_start; + if (adj_end > mm->scan_end) + adj_end = mm->scan_end; } - if (check_free_hole(adj_start , adj_end, + if (check_free_hole(adj_start, adj_end, mm->scan_size, mm->scan_alignment)) { mm->scan_hit_start = hole_start; mm->scan_hit_size = hole_end; @@ -616,6 +653,8 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) mm->head_node.size = start - mm->head_node.start; list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); + mm->color_adjust = NULL; + return 0; } EXPORT_SYMBOL(drm_mm_init); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2b54142a46ed..0fdb3d29cbbb 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2748,8 +2748,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, if (map_and_fenceable) free_space = drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, - size, alignment, 0, - dev_priv->mm.gtt_mappable_end, + size, alignment, + 0, dev_priv->mm.gtt_mappable_end, 0); else free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, @@ -2760,7 +2760,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, obj->gtt_space = drm_mm_get_block_range_generic(free_space, size, alignment, 0, - dev_priv->mm.gtt_mappable_end, + 0, dev_priv->mm.gtt_mappable_end, 0); else obj->gtt_space = diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index ae7c24e12e52..eba0308f10e3 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -78,11 +78,12 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, INIT_LIST_HEAD(&unwind_list); if (mappable) - drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size, - alignment, 0, - dev_priv->mm.gtt_mappable_end); + drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, + min_size, alignment, 0, + 0, dev_priv->mm.gtt_mappable_end); else - drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); + drm_mm_init_scan(&dev_priv->mm.gtt_space, + min_size, alignment, 0); /* First see if there is a large enough contiguous idle region... */ list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index 564b14aa7e16..06d7f798a08c 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -50,6 +50,7 @@ struct drm_mm_node { unsigned scanned_next_free : 1; unsigned scanned_preceeds_hole : 1; unsigned allocated : 1; + unsigned long color; unsigned long start; unsigned long size; struct drm_mm *mm; @@ -66,6 +67,7 @@ struct drm_mm { spinlock_t unused_lock; unsigned int scan_check_range : 1; unsigned scan_alignment; + unsigned long scan_color; unsigned long scan_size; unsigned long scan_hit_start; unsigned scan_hit_size; @@ -73,6 +75,9 @@ struct drm_mm { unsigned long scan_start; unsigned long scan_end; struct drm_mm_node *prev_scanned_node; + + void (*color_adjust)(struct drm_mm_node *node, unsigned long color, + unsigned long *start, unsigned long *end); }; static inline bool drm_mm_node_allocated(struct drm_mm_node *node) @@ -100,11 +105,13 @@ static inline bool drm_mm_initialized(struct drm_mm *mm) extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, unsigned long size, unsigned alignment, + unsigned long color, int atomic); extern struct drm_mm_node *drm_mm_get_block_range_generic( struct drm_mm_node *node, unsigned long size, unsigned alignment, + unsigned long color, unsigned long start, unsigned long end, int atomic); @@ -112,13 +119,13 @@ static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent, unsigned long size, unsigned alignment) { - return drm_mm_get_block_generic(parent, size, alignment, 0); + return drm_mm_get_block_generic(parent, size, alignment, 0, 0); } static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent, unsigned long size, unsigned alignment) { - return drm_mm_get_block_generic(parent, size, alignment, 1); + return drm_mm_get_block_generic(parent, size, alignment, 0, 1); } static inline struct drm_mm_node *drm_mm_get_block_range( struct drm_mm_node *parent, @@ -127,8 +134,19 @@ static inline struct drm_mm_node *drm_mm_get_block_range( unsigned long start, unsigned long end) { - return drm_mm_get_block_range_generic(parent, size, alignment, - start, end, 0); + return drm_mm_get_block_range_generic(parent, size, alignment, 0, + start, end, 0); +} +static inline struct drm_mm_node *drm_mm_get_color_block_range( + struct drm_mm_node *parent, + unsigned long size, + unsigned alignment, + unsigned long color, + unsigned long start, + unsigned long end) +{ + return drm_mm_get_block_range_generic(parent, size, alignment, color, + start, end, 0); } static inline struct drm_mm_node *drm_mm_get_block_atomic_range( struct drm_mm_node *parent, @@ -137,7 +155,7 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic_range( unsigned long start, unsigned long end) { - return drm_mm_get_block_range_generic(parent, size, alignment, + return drm_mm_get_block_range_generic(parent, size, alignment, 0, start, end, 1); } extern int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node, @@ -149,18 +167,59 @@ extern int drm_mm_insert_node_in_range(struct drm_mm *mm, extern void drm_mm_put_block(struct drm_mm_node *cur); extern void drm_mm_remove_node(struct drm_mm_node *node); extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); -extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, - unsigned long size, - unsigned alignment, - int best_match); -extern struct drm_mm_node *drm_mm_search_free_in_range( +extern struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, + unsigned long size, + unsigned alignment, + unsigned long color, + bool best_match); +extern struct drm_mm_node *drm_mm_search_free_in_range_generic( + const struct drm_mm *mm, + unsigned long size, + unsigned alignment, + unsigned long color, + unsigned long start, + unsigned long end, + bool best_match); +static inline struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, + unsigned long size, + unsigned alignment, + bool best_match) +{ + return drm_mm_search_free_generic(mm,size, alignment, 0, best_match); +} +static inline struct drm_mm_node *drm_mm_search_free_in_range( const struct drm_mm *mm, unsigned long size, unsigned alignment, unsigned long start, unsigned long end, - int best_match); -extern int drm_mm_init(struct drm_mm *mm, unsigned long start, + bool best_match) +{ + return drm_mm_search_free_in_range_generic(mm, size, alignment, 0, + start, end, best_match); +} +static inline struct drm_mm_node *drm_mm_search_free_color(const struct drm_mm *mm, + unsigned long size, + unsigned alignment, + unsigned long color, + bool best_match) +{ + return drm_mm_search_free_generic(mm,size, alignment, color, best_match); +} +static inline struct drm_mm_node *drm_mm_search_free_in_range_color( + const struct drm_mm *mm, + unsigned long size, + unsigned alignment, + unsigned long color, + unsigned long start, + unsigned long end, + bool best_match) +{ + return drm_mm_search_free_in_range_generic(mm, size, alignment, color, + start, end, best_match); +} +extern int drm_mm_init(struct drm_mm *mm, + unsigned long start, unsigned long size); extern void drm_mm_takedown(struct drm_mm *mm); extern int drm_mm_clean(struct drm_mm *mm); @@ -171,10 +230,14 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block) return block->mm; } -void drm_mm_init_scan(struct drm_mm *mm, unsigned long size, - unsigned alignment); -void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size, +void drm_mm_init_scan(struct drm_mm *mm, + unsigned long size, + unsigned alignment, + unsigned long color); +void drm_mm_init_scan_with_range(struct drm_mm *mm, + unsigned long size, unsigned alignment, + unsigned long color, unsigned long start, unsigned long end); int drm_mm_scan_add_block(struct drm_mm_node *node); -- cgit v1.2.3 From 7ecc45e3ef8468abb062be2a8bb427029342f42d Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 29 Jun 2012 11:33:12 +0200 Subject: drm/radeon: add error handling to fence_wait_empty_locked MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of returning the error handle it directly and while at it fix the comments about the ring lock. Signed-off-by: Christian König Reviewed-by: Michel Dänzer Reviewed-by: Jerome Glisse Reviewed-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon.h | 2 +- drivers/gpu/drm/radeon/radeon_fence.c | 33 +++++++++++++++++++++------------ 2 files changed, 22 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 77b4519b19b8..5861ec86725f 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -239,7 +239,7 @@ void radeon_fence_process(struct radeon_device *rdev, int ring); bool radeon_fence_signaled(struct radeon_fence *fence); int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring); -int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring); +void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring); int radeon_fence_wait_any(struct radeon_device *rdev, struct radeon_fence **fences, bool intr); diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 7b55625a5e18..be4e4f390e89 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -440,14 +440,11 @@ int radeon_fence_wait_any(struct radeon_device *rdev, return 0; } +/* caller must hold ring lock */ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) { uint64_t seq; - /* We are not protected by ring lock when reading current seq but - * it's ok as worst case is we return to early while we could have - * wait. - */ seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; if (seq >= rdev->fence_drv[ring].sync_seq[ring]) { /* nothing to wait for, last_seq is @@ -457,15 +454,27 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) return radeon_fence_wait_seq(rdev, seq, ring, false, false); } -int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) +/* caller must hold ring lock */ +void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) { - /* We are not protected by ring lock when reading current seq - * but it's ok as wait empty is call from place where no more - * activity can be scheduled so there won't be concurrent access - * to seq value. - */ - return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].sync_seq[ring], - ring, false, false); + uint64_t seq = rdev->fence_drv[ring].sync_seq[ring]; + + while(1) { + int r; + r = radeon_fence_wait_seq(rdev, seq, ring, false, false); + if (r == -EDEADLK) { + mutex_unlock(&rdev->ring_lock); + r = radeon_gpu_reset(rdev); + mutex_lock(&rdev->ring_lock); + if (!r) + continue; + } + if (r) { + dev_err(rdev->dev, "error waiting for ring to become" + " idle (%d)\n", r); + } + return; + } } struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) -- cgit v1.2.3 From 35e56bd0a42f06b215741fb94bc8d1e42e946449 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 25 Jun 2012 15:13:50 +0200 Subject: drm/radeon: add error handling to radeon_vm_unbind_locked MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Waiting for a fence can fail for different reasons, the most common is a deadlock. Signed-off-by: Christian König Reviewed-by: Michel Dänzer Reviewed-by: Jerome Glisse Reviewed-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_gart.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 2b34c1a91421..ee11c5073726 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -316,10 +316,21 @@ static void radeon_vm_unbind_locked(struct radeon_device *rdev, } /* wait for vm use to end */ - if (vm->fence) { - radeon_fence_wait(vm->fence, false); - radeon_fence_unref(&vm->fence); + while (vm->fence) { + int r; + r = radeon_fence_wait(vm->fence, false); + if (r) + DRM_ERROR("error while waiting for fence: %d\n", r); + if (r == -EDEADLK) { + mutex_unlock(&rdev->vm_manager.lock); + r = radeon_gpu_reset(rdev); + mutex_lock(&rdev->vm_manager.lock); + if (!r) + continue; + } + break; } + radeon_fence_unref(&vm->fence); /* hw unbind */ rdev->vm_manager.funcs->unbind(rdev, vm); -- cgit v1.2.3 From 93bf888c5c730605e3470f5d2381f296eda88d79 Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 3 Jul 2012 14:05:41 +0200 Subject: drm/radeon: fix fence related segfault in CS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Don't return success if scheduling the IB fails, otherwise we end up with an oops in ttm_eu_fence_buffer_objects. Signed-off-by: Christian König Reviewed-by: Jerome Glisse Reviewed-by: Michel Dänzer Reviewed-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/radeon_cs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index f1b75275f594..d5aec095d358 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -358,7 +358,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev, if (r) { DRM_ERROR("Failed to schedule IB !\n"); } - return 0; + return r; } static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser, -- cgit v1.2.3 From dee53e7fb3ee01b83c75a8e8df8c444049d38058 Mon Sep 17 00:00:00 2001 From: Jerome Glisse Date: Mon, 2 Jul 2012 12:45:19 -0400 Subject: drm/radeon: add an exclusive lock for GPU reset v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit GPU reset need to be exclusive, one happening at a time. For this add a rw semaphore so that any path that trigger GPU activities have to take the semaphore as a reader thus allowing concurency. The GPU reset path take the semaphore as a writer ensuring that no concurrent reset take place. v2: init rw semaphore Signed-off-by: Jerome Glisse Reviewed-by: Christian König Reviewed-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon.h | 1 + drivers/gpu/drm/radeon/radeon_cs.c | 5 +++++ drivers/gpu/drm/radeon/radeon_device.c | 3 +++ drivers/gpu/drm/radeon/radeon_gem.c | 8 ++++++++ 4 files changed, 17 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 5861ec86725f..44878738bcde 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1446,6 +1446,7 @@ struct radeon_device { struct device *dev; struct drm_device *ddev; struct pci_dev *pdev; + struct rw_semaphore exclusive_lock; /* ASIC */ union radeon_asic_config config; enum radeon_family family; diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index d5aec095d358..553da67a4cdd 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -499,7 +499,9 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) struct radeon_cs_parser parser; int r; + down_read(&rdev->exclusive_lock); if (!rdev->accel_working) { + up_read(&rdev->exclusive_lock); return -EBUSY; } /* initialize parser */ @@ -512,6 +514,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (r) { DRM_ERROR("Failed to initialize parser !\n"); radeon_cs_parser_fini(&parser, r); + up_read(&rdev->exclusive_lock); r = radeon_cs_handle_lockup(rdev, r); return r; } @@ -520,6 +523,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (r != -ERESTARTSYS) DRM_ERROR("Failed to parse relocation %d!\n", r); radeon_cs_parser_fini(&parser, r); + up_read(&rdev->exclusive_lock); r = radeon_cs_handle_lockup(rdev, r); return r; } @@ -533,6 +537,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) } out: radeon_cs_parser_fini(&parser, r); + up_read(&rdev->exclusive_lock); r = radeon_cs_handle_lockup(rdev, r); return r; } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index f654ba80e96e..254fdb4f73c9 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -734,6 +734,7 @@ int radeon_device_init(struct radeon_device *rdev, mutex_init(&rdev->gem.mutex); mutex_init(&rdev->pm.mutex); init_rwsem(&rdev->pm.mclk_lock); + init_rwsem(&rdev->exclusive_lock); init_waitqueue_head(&rdev->irq.vblank_queue); init_waitqueue_head(&rdev->irq.idle_queue); r = radeon_gem_init(rdev); @@ -988,6 +989,7 @@ int radeon_gpu_reset(struct radeon_device *rdev) int r; int resched; + down_write(&rdev->exclusive_lock); radeon_save_bios_scratch_regs(rdev); /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); @@ -1007,6 +1009,7 @@ int radeon_gpu_reset(struct radeon_device *rdev) dev_info(rdev->dev, "GPU reset failed\n"); } + up_write(&rdev->exclusive_lock); return r; } diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index d9b0809be4e4..b0be9c4f4c91 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -215,12 +215,14 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, uint32_t handle; int r; + down_read(&rdev->exclusive_lock); /* create a gem object to contain this object in */ args->size = roundup(args->size, PAGE_SIZE); r = radeon_gem_object_create(rdev, args->size, args->alignment, args->initial_domain, false, false, &gobj); if (r) { + up_read(&rdev->exclusive_lock); r = radeon_gem_handle_lockup(rdev, r); return r; } @@ -228,10 +230,12 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, /* drop reference from allocate - handle holds it now */ drm_gem_object_unreference_unlocked(gobj); if (r) { + up_read(&rdev->exclusive_lock); r = radeon_gem_handle_lockup(rdev, r); return r; } args->handle = handle; + up_read(&rdev->exclusive_lock); return 0; } @@ -240,6 +244,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, { /* transition the BO to a domain - * just validate the BO into a certain domain */ + struct radeon_device *rdev = dev->dev_private; struct drm_radeon_gem_set_domain *args = data; struct drm_gem_object *gobj; struct radeon_bo *robj; @@ -247,10 +252,12 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, /* for now if someone requests domain CPU - * just make sure the buffer is finished with */ + down_read(&rdev->exclusive_lock); /* just do a BO wait for now */ gobj = drm_gem_object_lookup(dev, filp, args->handle); if (gobj == NULL) { + up_read(&rdev->exclusive_lock); return -ENOENT; } robj = gem_to_radeon_bo(gobj); @@ -258,6 +265,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); drm_gem_object_unreference_unlocked(gobj); + up_read(&rdev->exclusive_lock); r = radeon_gem_handle_lockup(robj->rdev, r); return r; } -- cgit v1.2.3 From 07a713305a2b55d6ec424000f2c5bed1268ac56b Mon Sep 17 00:00:00 2001 From: Christian König Date: Sat, 7 Jul 2012 12:11:32 +0200 Subject: drm/radeon: fix ring commit padding MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We don't need to pad anything if the number of dwords written to the ring already matches the requirements. Fixes some "writting more dword to ring than expected" warnings. Signed-off-by: Christian König Reviewed-by: Jerome Glisse Reviewed-by: Michel Dänzer Reviewed-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_ring.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 0826e77f99ae..674aabac2212 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -272,13 +272,8 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) { - unsigned count_dw_pad; - unsigned i; - /* We pad to match fetch size */ - count_dw_pad = (ring->align_mask + 1) - - (ring->wptr & ring->align_mask); - for (i = 0; i < count_dw_pad; i++) { + while (ring->wptr & ring->align_mask) { radeon_ring_write(ring, ring->nop); } DRM_MEMORYBARRIER(); -- cgit v1.2.3 From bf66625e02ac8e4535627394bcfb237bbf8a157f Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 9 Jul 2012 10:52:39 +0200 Subject: drm/radeon: fix fence value access MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It is possible that radeon_fence_process is called after writeback is disabled for suspend, leading to an invalid read of register 0x0. This fixes a problem for me where the fence value is temporary incremented by 0x100000000 on suspend/resume. Signed-off-by: Christian König Reviewed-by: Jerome Glisse Reviewed-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_fence.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index be4e4f390e89..a194a142f898 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -42,21 +42,23 @@ static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) { - if (rdev->wb.enabled) { - *rdev->fence_drv[ring].cpu_addr = cpu_to_le32(seq); + struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; + if (likely(rdev->wb.enabled || !drv->scratch_reg)) { + *drv->cpu_addr = cpu_to_le32(seq); } else { - WREG32(rdev->fence_drv[ring].scratch_reg, seq); + WREG32(drv->scratch_reg, seq); } } static u32 radeon_fence_read(struct radeon_device *rdev, int ring) { + struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; u32 seq = 0; - if (rdev->wb.enabled) { - seq = le32_to_cpu(*rdev->fence_drv[ring].cpu_addr); + if (likely(rdev->wb.enabled || !drv->scratch_reg)) { + seq = le32_to_cpu(*drv->cpu_addr); } else { - seq = RREG32(rdev->fence_drv[ring].scratch_reg); + seq = RREG32(drv->scratch_reg); } return seq; } -- cgit v1.2.3 From 31be6183d53e89ff95ec7cb70aac67fa5588e0df Mon Sep 17 00:00:00 2001 From: Christian König Date: Sat, 7 Jul 2012 13:10:39 +0200 Subject: drm/radeon: fix fence init after resume MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Start with last signaled fence number instead of last emitted one. Signed-off-by: Christian König Reviewed-by: Jerome Glisse Reviewed-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_fence.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index a194a142f898..76c5b22bfd22 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -578,7 +578,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) } rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; - radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring); + radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring); rdev->fence_drv[ring].initialized = true; dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n", ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); -- cgit v1.2.3 From d40fd3a3372945b35e22ea93a84277c23216747a Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 5 Jul 2012 13:33:41 +0200 Subject: drm/radeon: remove FIXME comment from chipset suspend MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For a normal suspend/resume we allready wait for the rings to be empty, and for a suspend/reasume in case of a lockup we REALLY don't want to wait for anything. Signed-off-by: Christian König Reviewed-by: Jerome Glisse Reviewed-by: Alex Deucher --- drivers/gpu/drm/radeon/evergreen.c | 1 - drivers/gpu/drm/radeon/ni.c | 1 - drivers/gpu/drm/radeon/r600.c | 1 - drivers/gpu/drm/radeon/rv770.c | 1 - drivers/gpu/drm/radeon/si.c | 1 - 5 files changed, 5 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f716e081c813..eb9a71a7a646 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -3137,7 +3137,6 @@ int evergreen_suspend(struct radeon_device *rdev) struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; r600_audio_fini(rdev); - /* FIXME: we should wait for ring to be empty */ radeon_ib_pool_suspend(rdev); r600_blit_suspend(rdev); r700_cp_stop(rdev); diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 2366be3df074..32a608270d7e 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1334,7 +1334,6 @@ int cayman_resume(struct radeon_device *rdev) int cayman_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); - /* FIXME: we should wait for ring to be empty */ radeon_ib_pool_suspend(rdev); radeon_vm_manager_suspend(rdev); r600_blit_suspend(rdev); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 43d0c41922a5..de4de2dac160 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2461,7 +2461,6 @@ int r600_suspend(struct radeon_device *rdev) r600_audio_fini(rdev); radeon_ib_pool_suspend(rdev); r600_blit_suspend(rdev); - /* FIXME: we should wait for ring to be empty */ r600_cp_stop(rdev); rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; r600_irq_suspend(rdev); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index b4f51c569c36..7e230f62f0fa 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -996,7 +996,6 @@ int rv770_suspend(struct radeon_device *rdev) r600_audio_fini(rdev); radeon_ib_pool_suspend(rdev); r600_blit_suspend(rdev); - /* FIXME: we should wait for ring to be empty */ r700_cp_stop(rdev); rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; r600_irq_suspend(rdev); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 34603b3c80ab..78c790fec63f 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -3807,7 +3807,6 @@ int si_resume(struct radeon_device *rdev) int si_suspend(struct radeon_device *rdev) { - /* FIXME: we should wait for ring to be empty */ radeon_ib_pool_suspend(rdev); radeon_vm_manager_suspend(rdev); #if 0 -- cgit v1.2.3 From b90ca986a41b233c2fcc8c809ee4f970d2de075e Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 4 Jul 2012 21:36:53 +0200 Subject: drm/radeon: make cp init on cayman more robust MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's not critical, but the current code isn't 100% correct. Signed-off-by: Christian König Reviewed-by: Jerome Glisse Reviewed-by: Alex Deucher --- drivers/gpu/drm/radeon/ni.c | 133 +++++++++++++++++++------------------------- 1 file changed, 56 insertions(+), 77 deletions(-) diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 32a608270d7e..8b1df33c7be9 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -987,10 +987,33 @@ static void cayman_cp_fini(struct radeon_device *rdev) int cayman_cp_resume(struct radeon_device *rdev) { + static const int ridx[] = { + RADEON_RING_TYPE_GFX_INDEX, + CAYMAN_RING_TYPE_CP1_INDEX, + CAYMAN_RING_TYPE_CP2_INDEX + }; + static const unsigned cp_rb_cntl[] = { + CP_RB0_CNTL, + CP_RB1_CNTL, + CP_RB2_CNTL, + }; + static const unsigned cp_rb_rptr_addr[] = { + CP_RB0_RPTR_ADDR, + CP_RB1_RPTR_ADDR, + CP_RB2_RPTR_ADDR + }; + static const unsigned cp_rb_rptr_addr_hi[] = { + CP_RB0_RPTR_ADDR_HI, + CP_RB1_RPTR_ADDR_HI, + CP_RB2_RPTR_ADDR_HI + }; + static const unsigned cp_rb_base[] = { + CP_RB0_BASE, + CP_RB1_BASE, + CP_RB2_BASE + }; struct radeon_ring *ring; - u32 tmp; - u32 rb_bufsz; - int r; + int i, r; /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | @@ -1012,91 +1035,47 @@ int cayman_cp_resume(struct radeon_device *rdev) WREG32(CP_DEBUG, (1 << 27)); - /* ring 0 - compute and gfx */ - /* Set ring buffer size */ - ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; - rb_bufsz = drm_order(ring->ring_size / 8); - tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; -#ifdef __BIG_ENDIAN - tmp |= BUF_SWAP_32BIT; -#endif - WREG32(CP_RB0_CNTL, tmp); - - /* Initialize the ring buffer's read and write pointers */ - WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); - ring->wptr = 0; - WREG32(CP_RB0_WPTR, ring->wptr); - /* set the wb address wether it's enabled or not */ - WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); - WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); + WREG32(SCRATCH_UMSK, 0xff); - if (rdev->wb.enabled) - WREG32(SCRATCH_UMSK, 0xff); - else { - tmp |= RB_NO_UPDATE; - WREG32(SCRATCH_UMSK, 0); - } - - mdelay(1); - WREG32(CP_RB0_CNTL, tmp); - - WREG32(CP_RB0_BASE, ring->gpu_addr >> 8); - - ring->rptr = RREG32(CP_RB0_RPTR); + for (i = 0; i < 3; ++i) { + uint32_t rb_cntl; + uint64_t addr; - /* ring1 - compute only */ - /* Set ring buffer size */ - ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; - rb_bufsz = drm_order(ring->ring_size / 8); - tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + /* Set ring buffer size */ + ring = &rdev->ring[ridx[i]]; + rb_cntl = drm_order(ring->ring_size / 8); + rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8; #ifdef __BIG_ENDIAN - tmp |= BUF_SWAP_32BIT; + rb_cntl |= BUF_SWAP_32BIT; #endif - WREG32(CP_RB1_CNTL, tmp); + WREG32(cp_rb_cntl[i], rb_cntl); - /* Initialize the ring buffer's read and write pointers */ - WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); - ring->wptr = 0; - WREG32(CP_RB1_WPTR, ring->wptr); - - /* set the wb address wether it's enabled or not */ - WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); - WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF); - - mdelay(1); - WREG32(CP_RB1_CNTL, tmp); - - WREG32(CP_RB1_BASE, ring->gpu_addr >> 8); - - ring->rptr = RREG32(CP_RB1_RPTR); - - /* ring2 - compute only */ - /* Set ring buffer size */ - ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; - rb_bufsz = drm_order(ring->ring_size / 8); - tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; -#ifdef __BIG_ENDIAN - tmp |= BUF_SWAP_32BIT; -#endif - WREG32(CP_RB2_CNTL, tmp); - - /* Initialize the ring buffer's read and write pointers */ - WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); - ring->wptr = 0; - WREG32(CP_RB2_WPTR, ring->wptr); + /* set the wb address wether it's enabled or not */ + addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET; + WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC); + WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF); + } - /* set the wb address wether it's enabled or not */ - WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); - WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF); + /* set the rb base addr, this causes an internal reset of ALL rings */ + for (i = 0; i < 3; ++i) { + ring = &rdev->ring[ridx[i]]; + WREG32(cp_rb_base[i], ring->gpu_addr >> 8); + } - mdelay(1); - WREG32(CP_RB2_CNTL, tmp); + for (i = 0; i < 3; ++i) { + /* Initialize the ring buffer's read and write pointers */ + ring = &rdev->ring[ridx[i]]; + WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA); - WREG32(CP_RB2_BASE, ring->gpu_addr >> 8); + ring->rptr = ring->wptr = 0; + WREG32(ring->rptr_reg, ring->rptr); + WREG32(ring->wptr_reg, ring->wptr); - ring->rptr = RREG32(CP_RB2_RPTR); + mdelay(1); + WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA); + } /* start the rings */ cayman_cp_start(rdev); -- cgit v1.2.3 From 2898c348e5caa50b31b9113726dfa45b2dfc3124 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 5 Jul 2012 11:55:34 +0200 Subject: drm/radeon: remove ip_pool start/suspend MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The IB pool is in gart memory, so it is completely superfluous to unpin / repin it on suspend / resume. Signed-off-by: Christian König Reviewed-by: Alex Deucher --- drivers/gpu/drm/radeon/evergreen.c | 17 ++++++----------- drivers/gpu/drm/radeon/ni.c | 16 ++++++---------- drivers/gpu/drm/radeon/r100.c | 23 ++++++----------------- drivers/gpu/drm/radeon/r300.c | 17 ++++++----------- drivers/gpu/drm/radeon/r420.c | 17 ++++++----------- drivers/gpu/drm/radeon/r520.c | 14 +++++--------- drivers/gpu/drm/radeon/r600.c | 17 ++++++----------- drivers/gpu/drm/radeon/radeon.h | 2 -- drivers/gpu/drm/radeon/radeon_asic.h | 1 - drivers/gpu/drm/radeon/radeon_ring.c | 17 +++++++---------- drivers/gpu/drm/radeon/rs400.c | 17 ++++++----------- drivers/gpu/drm/radeon/rs600.c | 17 ++++++----------- drivers/gpu/drm/radeon/rs690.c | 17 ++++++----------- drivers/gpu/drm/radeon/rv515.c | 16 ++++++---------- drivers/gpu/drm/radeon/rv770.c | 17 ++++++----------- drivers/gpu/drm/radeon/si.c | 16 ++++++---------- 16 files changed, 84 insertions(+), 157 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index eb9a71a7a646..64e06e67facd 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -3087,9 +3087,11 @@ static int evergreen_startup(struct radeon_device *rdev) if (r) return r; - r = radeon_ib_pool_start(rdev); - if (r) + r = radeon_ib_pool_init(rdev); + if (r) { + dev_err(rdev->dev, "IB initialization failed (%d).\n", r); return r; + } r = radeon_ib_ring_tests(rdev); if (r) @@ -3137,7 +3139,6 @@ int evergreen_suspend(struct radeon_device *rdev) struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; r600_audio_fini(rdev); - radeon_ib_pool_suspend(rdev); r600_blit_suspend(rdev); r700_cp_stop(rdev); ring->ready = false; @@ -3224,20 +3225,14 @@ int evergreen_init(struct radeon_device *rdev) if (r) return r; - r = radeon_ib_pool_init(rdev); rdev->accel_working = true; - if (r) { - dev_err(rdev->dev, "IB initialization failed (%d).\n", r); - rdev->accel_working = false; - } - r = evergreen_startup(rdev); if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); r700_cp_fini(rdev); r600_irq_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); evergreen_pcie_gart_fini(rdev); rdev->accel_working = false; @@ -3264,7 +3259,7 @@ void evergreen_fini(struct radeon_device *rdev) r700_cp_fini(rdev); r600_irq_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); evergreen_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 8b1df33c7be9..fe553102df82 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1270,9 +1270,11 @@ static int cayman_startup(struct radeon_device *rdev) if (r) return r; - r = radeon_ib_pool_start(rdev); - if (r) + r = radeon_ib_pool_init(rdev); + if (r) { + dev_err(rdev->dev, "IB initialization failed (%d).\n", r); return r; + } r = radeon_ib_ring_tests(rdev); if (r) @@ -1313,7 +1315,6 @@ int cayman_resume(struct radeon_device *rdev) int cayman_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); - radeon_ib_pool_suspend(rdev); radeon_vm_manager_suspend(rdev); r600_blit_suspend(rdev); cayman_cp_enable(rdev, false); @@ -1391,12 +1392,7 @@ int cayman_init(struct radeon_device *rdev) if (r) return r; - r = radeon_ib_pool_init(rdev); rdev->accel_working = true; - if (r) { - dev_err(rdev->dev, "IB initialization failed (%d).\n", r); - rdev->accel_working = false; - } r = radeon_vm_manager_init(rdev); if (r) { dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r); @@ -1410,7 +1406,7 @@ int cayman_init(struct radeon_device *rdev) if (rdev->flags & RADEON_IS_IGP) si_rlc_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); radeon_vm_manager_fini(rdev); radeon_irq_kms_fini(rdev); cayman_pcie_gart_fini(rdev); @@ -1441,7 +1437,7 @@ void cayman_fini(struct radeon_device *rdev) si_rlc_fini(rdev); radeon_wb_fini(rdev); radeon_vm_manager_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); cayman_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index d06c8dd8ddfa..9524bd4aca75 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -3722,12 +3722,6 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) return r; } -void r100_ib_fini(struct radeon_device *rdev) -{ - radeon_ib_pool_suspend(rdev); - radeon_ib_pool_fini(rdev); -} - void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) { /* Shutdown CP we shouldn't need to do that but better be safe than @@ -3887,9 +3881,11 @@ static int r100_startup(struct radeon_device *rdev) return r; } - r = radeon_ib_pool_start(rdev); - if (r) + r = radeon_ib_pool_init(rdev); + if (r) { + dev_err(rdev->dev, "IB initialization failed (%d).\n", r); return r; + } r = radeon_ib_ring_tests(rdev); if (r) @@ -3930,7 +3926,6 @@ int r100_resume(struct radeon_device *rdev) int r100_suspend(struct radeon_device *rdev) { - radeon_ib_pool_suspend(rdev); r100_cp_disable(rdev); radeon_wb_disable(rdev); r100_irq_disable(rdev); @@ -3943,7 +3938,7 @@ void r100_fini(struct radeon_device *rdev) { r100_cp_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); radeon_gem_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); @@ -4050,20 +4045,14 @@ int r100_init(struct radeon_device *rdev) } r100_set_safe_registers(rdev); - r = radeon_ib_pool_init(rdev); rdev->accel_working = true; - if (r) { - dev_err(rdev->dev, "IB initialization failed (%d).\n", r); - rdev->accel_working = false; - } - r = r100_startup(rdev); if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 97722a33e513..b396e341a7b8 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -1391,9 +1391,11 @@ static int r300_startup(struct radeon_device *rdev) return r; } - r = radeon_ib_pool_start(rdev); - if (r) + r = radeon_ib_pool_init(rdev); + if (r) { + dev_err(rdev->dev, "IB initialization failed (%d).\n", r); return r; + } r = radeon_ib_ring_tests(rdev); if (r) @@ -1436,7 +1438,6 @@ int r300_resume(struct radeon_device *rdev) int r300_suspend(struct radeon_device *rdev) { - radeon_ib_pool_suspend(rdev); r100_cp_disable(rdev); radeon_wb_disable(rdev); r100_irq_disable(rdev); @@ -1451,7 +1452,7 @@ void r300_fini(struct radeon_device *rdev) { r100_cp_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); radeon_gem_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_fini(rdev); @@ -1538,20 +1539,14 @@ int r300_init(struct radeon_device *rdev) } r300_set_reg_safe(rdev); - r = radeon_ib_pool_init(rdev); rdev->accel_working = true; - if (r) { - dev_err(rdev->dev, "IB initialization failed (%d).\n", r); - rdev->accel_working = false; - } - r = r300_startup(rdev); if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_fini(rdev); diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 99137be7a300..0062938a589c 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -275,9 +275,11 @@ static int r420_startup(struct radeon_device *rdev) } r420_cp_errata_init(rdev); - r = radeon_ib_pool_start(rdev); - if (r) + r = radeon_ib_pool_init(rdev); + if (r) { + dev_err(rdev->dev, "IB initialization failed (%d).\n", r); return r; + } r = radeon_ib_ring_tests(rdev); if (r) @@ -324,7 +326,6 @@ int r420_resume(struct radeon_device *rdev) int r420_suspend(struct radeon_device *rdev) { - radeon_ib_pool_suspend(rdev); r420_cp_errata_fini(rdev); r100_cp_disable(rdev); radeon_wb_disable(rdev); @@ -340,7 +341,7 @@ void r420_fini(struct radeon_device *rdev) { r100_cp_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); radeon_gem_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_fini(rdev); @@ -438,20 +439,14 @@ int r420_init(struct radeon_device *rdev) } r420_set_reg_safe(rdev); - r = radeon_ib_pool_init(rdev); rdev->accel_working = true; - if (r) { - dev_err(rdev->dev, "IB initialization failed (%d).\n", r); - rdev->accel_working = false; - } - r = r420_startup(rdev); if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_fini(rdev); diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index b5cf8375cd25..6df3e51acb9f 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c @@ -203,9 +203,11 @@ static int r520_startup(struct radeon_device *rdev) return r; } - r = radeon_ib_pool_start(rdev); - if (r) + r = radeon_ib_pool_init(rdev); + if (r) { + dev_err(rdev->dev, "IB initialization failed (%d).\n", r); return r; + } r = radeon_ib_ring_tests(rdev); if (r) @@ -311,20 +313,14 @@ int r520_init(struct radeon_device *rdev) return r; rv515_set_safe_registers(rdev); - r = radeon_ib_pool_init(rdev); rdev->accel_working = true; - if (r) { - dev_err(rdev->dev, "IB initialization failed (%d).\n", r); - rdev->accel_working = false; - } - r = r520_startup(rdev); if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); rv370_pcie_gart_fini(rdev); radeon_agp_fini(rdev); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index de4de2dac160..9750f538cc86 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2403,9 +2403,11 @@ int r600_startup(struct radeon_device *rdev) if (r) return r; - r = radeon_ib_pool_start(rdev); - if (r) + r = radeon_ib_pool_init(rdev); + if (r) { + dev_err(rdev->dev, "IB initialization failed (%d).\n", r); return r; + } r = radeon_ib_ring_tests(rdev); if (r) @@ -2459,7 +2461,6 @@ int r600_resume(struct radeon_device *rdev) int r600_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); - radeon_ib_pool_suspend(rdev); r600_blit_suspend(rdev); r600_cp_stop(rdev); rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; @@ -2542,20 +2543,14 @@ int r600_init(struct radeon_device *rdev) if (r) return r; - r = radeon_ib_pool_init(rdev); rdev->accel_working = true; - if (r) { - dev_err(rdev->dev, "IB initialization failed (%d).\n", r); - rdev->accel_working = false; - } - r = r600_startup(rdev); if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); r600_cp_fini(rdev); r600_irq_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); r600_pcie_gart_fini(rdev); rdev->accel_working = false; @@ -2571,7 +2566,7 @@ void r600_fini(struct radeon_device *rdev) r600_cp_fini(rdev); r600_irq_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); r600_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 44878738bcde..7df76b9cc3af 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -755,8 +755,6 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib); int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib); int radeon_ib_pool_init(struct radeon_device *rdev); void radeon_ib_pool_fini(struct radeon_device *rdev); -int radeon_ib_pool_start(struct radeon_device *rdev); -int radeon_ib_pool_suspend(struct radeon_device *rdev); int radeon_ib_ring_tests(struct radeon_device *rdev); /* Ring access between begin & end cannot sleep */ int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp); diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 94c427ab0f5c..f4af24310438 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -103,7 +103,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev); void r100_pci_gart_disable(struct radeon_device *rdev); int r100_debugfs_mc_info_init(struct radeon_device *rdev); int r100_gui_wait_for_idle(struct radeon_device *rdev); -void r100_ib_fini(struct radeon_device *rdev); int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); void r100_irq_disable(struct radeon_device *rdev); void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save); diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 674aabac2212..087383408a8f 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -129,6 +129,12 @@ int radeon_ib_pool_init(struct radeon_device *rdev) if (r) { return r; } + + r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo); + if (r) { + return r; + } + rdev->ib_pool_ready = true; if (radeon_debugfs_sa_init(rdev)) { dev_err(rdev->dev, "failed to register debugfs file for SA\n"); @@ -139,21 +145,12 @@ int radeon_ib_pool_init(struct radeon_device *rdev) void radeon_ib_pool_fini(struct radeon_device *rdev) { if (rdev->ib_pool_ready) { + radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo); radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo); rdev->ib_pool_ready = false; } } -int radeon_ib_pool_start(struct radeon_device *rdev) -{ - return radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo); -} - -int radeon_ib_pool_suspend(struct radeon_device *rdev) -{ - return radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo); -} - int radeon_ib_ring_tests(struct radeon_device *rdev) { unsigned i; diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index a464eb5e2df2..aa26076ff468 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -426,9 +426,11 @@ static int rs400_startup(struct radeon_device *rdev) return r; } - r = radeon_ib_pool_start(rdev); - if (r) + r = radeon_ib_pool_init(rdev); + if (r) { + dev_err(rdev->dev, "IB initialization failed (%d).\n", r); return r; + } r = radeon_ib_ring_tests(rdev); if (r) @@ -470,7 +472,6 @@ int rs400_resume(struct radeon_device *rdev) int rs400_suspend(struct radeon_device *rdev) { - radeon_ib_pool_suspend(rdev); r100_cp_disable(rdev); radeon_wb_disable(rdev); r100_irq_disable(rdev); @@ -482,7 +483,7 @@ void rs400_fini(struct radeon_device *rdev) { r100_cp_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); radeon_gem_fini(rdev); rs400_gart_fini(rdev); radeon_irq_kms_fini(rdev); @@ -550,20 +551,14 @@ int rs400_init(struct radeon_device *rdev) return r; r300_set_reg_safe(rdev); - r = radeon_ib_pool_init(rdev); rdev->accel_working = true; - if (r) { - dev_err(rdev->dev, "IB initialization failed (%d).\n", r); - rdev->accel_working = false; - } - r = rs400_startup(rdev); if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); rs400_gart_fini(rdev); radeon_irq_kms_fini(rdev); rdev->accel_working = false; diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index e11bc4651784..6dad4e617727 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -907,9 +907,11 @@ static int rs600_startup(struct radeon_device *rdev) return r; } - r = radeon_ib_pool_start(rdev); - if (r) + r = radeon_ib_pool_init(rdev); + if (r) { + dev_err(rdev->dev, "IB initialization failed (%d).\n", r); return r; + } r = radeon_ib_ring_tests(rdev); if (r) @@ -955,7 +957,6 @@ int rs600_resume(struct radeon_device *rdev) int rs600_suspend(struct radeon_device *rdev) { - radeon_ib_pool_suspend(rdev); r600_audio_fini(rdev); r100_cp_disable(rdev); radeon_wb_disable(rdev); @@ -969,7 +970,7 @@ void rs600_fini(struct radeon_device *rdev) r600_audio_fini(rdev); r100_cp_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); radeon_gem_fini(rdev); rs600_gart_fini(rdev); radeon_irq_kms_fini(rdev); @@ -1037,20 +1038,14 @@ int rs600_init(struct radeon_device *rdev) return r; rs600_set_safe_registers(rdev); - r = radeon_ib_pool_init(rdev); rdev->accel_working = true; - if (r) { - dev_err(rdev->dev, "IB initialization failed (%d).\n", r); - rdev->accel_working = false; - } - r = rs600_startup(rdev); if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); rs600_gart_fini(rdev); radeon_irq_kms_fini(rdev); rdev->accel_working = false; diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 159b6a43fda0..0c026b0210d4 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -637,9 +637,11 @@ static int rs690_startup(struct radeon_device *rdev) return r; } - r = radeon_ib_pool_start(rdev); - if (r) + r = radeon_ib_pool_init(rdev); + if (r) { + dev_err(rdev->dev, "IB initialization failed (%d).\n", r); return r; + } r = radeon_ib_ring_tests(rdev); if (r) @@ -685,7 +687,6 @@ int rs690_resume(struct radeon_device *rdev) int rs690_suspend(struct radeon_device *rdev) { - radeon_ib_pool_suspend(rdev); r600_audio_fini(rdev); r100_cp_disable(rdev); radeon_wb_disable(rdev); @@ -699,7 +700,7 @@ void rs690_fini(struct radeon_device *rdev) r600_audio_fini(rdev); r100_cp_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); radeon_gem_fini(rdev); rs400_gart_fini(rdev); radeon_irq_kms_fini(rdev); @@ -768,20 +769,14 @@ int rs690_init(struct radeon_device *rdev) return r; rs600_set_safe_registers(rdev); - r = radeon_ib_pool_init(rdev); rdev->accel_working = true; - if (r) { - dev_err(rdev->dev, "IB initialization failed (%d).\n", r); - rdev->accel_working = false; - } - r = rs690_startup(rdev); if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); rs400_gart_fini(rdev); radeon_irq_kms_fini(rdev); rdev->accel_working = false; diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 7f08cedb5333..01e9155541ea 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -408,9 +408,11 @@ static int rv515_startup(struct radeon_device *rdev) return r; } - r = radeon_ib_pool_start(rdev); - if (r) + r = radeon_ib_pool_init(rdev); + if (r) { + dev_err(rdev->dev, "IB initialization failed (%d).\n", r); return r; + } r = radeon_ib_ring_tests(rdev); if (r) @@ -469,7 +471,7 @@ void rv515_fini(struct radeon_device *rdev) { r100_cp_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); radeon_gem_fini(rdev); rv370_pcie_gart_fini(rdev); radeon_agp_fini(rdev); @@ -543,20 +545,14 @@ int rv515_init(struct radeon_device *rdev) return r; rv515_set_safe_registers(rdev); - r = radeon_ib_pool_init(rdev); rdev->accel_working = true; - if (r) { - dev_err(rdev->dev, "IB initialization failed (%d).\n", r); - rdev->accel_working = false; - } - r = rv515_startup(rdev); if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); rv370_pcie_gart_fini(rdev); radeon_agp_fini(rdev); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 7e230f62f0fa..cc0ffb9be2bd 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -951,9 +951,11 @@ static int rv770_startup(struct radeon_device *rdev) if (r) return r; - r = radeon_ib_pool_start(rdev); - if (r) + r = radeon_ib_pool_init(rdev); + if (r) { + dev_err(rdev->dev, "IB initialization failed (%d).\n", r); return r; + } r = radeon_ib_ring_tests(rdev); if (r) @@ -994,7 +996,6 @@ int rv770_resume(struct radeon_device *rdev) int rv770_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); - radeon_ib_pool_suspend(rdev); r600_blit_suspend(rdev); r700_cp_stop(rdev); rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; @@ -1075,20 +1076,14 @@ int rv770_init(struct radeon_device *rdev) if (r) return r; - r = radeon_ib_pool_init(rdev); rdev->accel_working = true; - if (r) { - dev_err(rdev->dev, "IB initialization failed (%d).\n", r); - rdev->accel_working = false; - } - r = rv770_startup(rdev); if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); r700_cp_fini(rdev); r600_irq_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); rv770_pcie_gart_fini(rdev); rdev->accel_working = false; @@ -1103,7 +1098,7 @@ void rv770_fini(struct radeon_device *rdev) r700_cp_fini(rdev); r600_irq_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); rv770_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 78c790fec63f..40405d302723 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -3750,9 +3750,11 @@ static int si_startup(struct radeon_device *rdev) if (r) return r; - r = radeon_ib_pool_start(rdev); - if (r) + r = radeon_ib_pool_init(rdev); + if (r) { + dev_err(rdev->dev, "IB initialization failed (%d).\n", r); return r; + } r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); if (r) { @@ -3807,7 +3809,6 @@ int si_resume(struct radeon_device *rdev) int si_suspend(struct radeon_device *rdev) { - radeon_ib_pool_suspend(rdev); radeon_vm_manager_suspend(rdev); #if 0 r600_blit_suspend(rdev); @@ -3900,12 +3901,7 @@ int si_init(struct radeon_device *rdev) if (r) return r; - r = radeon_ib_pool_init(rdev); rdev->accel_working = true; - if (r) { - dev_err(rdev->dev, "IB initialization failed (%d).\n", r); - rdev->accel_working = false; - } r = radeon_vm_manager_init(rdev); if (r) { dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r); @@ -3918,7 +3914,7 @@ int si_init(struct radeon_device *rdev) si_irq_fini(rdev); si_rlc_fini(rdev); radeon_wb_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); radeon_vm_manager_fini(rdev); radeon_irq_kms_fini(rdev); si_pcie_gart_fini(rdev); @@ -3947,7 +3943,7 @@ void si_fini(struct radeon_device *rdev) si_rlc_fini(rdev); radeon_wb_fini(rdev); radeon_vm_manager_fini(rdev); - r100_ib_fini(rdev); + radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); si_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); -- cgit v1.2.3 From 6f72a631998d37673828b0e97c63dafc8e923382 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 5 Jul 2012 16:05:28 +0200 Subject: drm/radeon: remove r600_blit_suspend MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just reinitialize the shader content on resume instead. Signed-off-by: Christian König Reviewed-by: Alex Deucher --- drivers/gpu/drm/radeon/evergreen.c | 1 - drivers/gpu/drm/radeon/evergreen_blit_kms.c | 40 ++++++++++++++--------------- drivers/gpu/drm/radeon/ni.c | 1 - drivers/gpu/drm/radeon/r600.c | 15 ----------- drivers/gpu/drm/radeon/r600_blit_kms.c | 40 ++++++++++++++--------------- drivers/gpu/drm/radeon/radeon.h | 2 -- drivers/gpu/drm/radeon/rv770.c | 1 - drivers/gpu/drm/radeon/si.c | 3 --- 8 files changed, 40 insertions(+), 63 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 64e06e67facd..82f7aea14a6e 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -3139,7 +3139,6 @@ int evergreen_suspend(struct radeon_device *rdev) struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; r600_audio_fini(rdev); - r600_blit_suspend(rdev); r700_cp_stop(rdev); ring->ready = false; evergreen_irq_suspend(rdev); diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index e512560ffc6f..89cb9feb5653 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c @@ -634,10 +634,6 @@ int evergreen_blit_init(struct radeon_device *rdev) rdev->r600_blit.max_dim = 16384; - /* pin copy shader into vram if already initialized */ - if (rdev->r600_blit.shader_obj) - goto done; - rdev->r600_blit.state_offset = 0; if (rdev->family < CHIP_CAYMAN) @@ -668,11 +664,26 @@ int evergreen_blit_init(struct radeon_device *rdev) obj_size += cayman_ps_size * 4; obj_size = ALIGN(obj_size, 256); - r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, - NULL, &rdev->r600_blit.shader_obj); - if (r) { - DRM_ERROR("evergreen failed to allocate shader\n"); - return r; + /* pin copy shader into vram if not already initialized */ + if (!rdev->r600_blit.shader_obj) { + r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, + RADEON_GEM_DOMAIN_VRAM, + NULL, &rdev->r600_blit.shader_obj); + if (r) { + DRM_ERROR("evergreen failed to allocate shader\n"); + return r; + } + + r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, + &rdev->r600_blit.shader_gpu_addr); + radeon_bo_unreserve(rdev->r600_blit.shader_obj); + if (r) { + dev_err(rdev->dev, "(%d) pin blit object failed\n", r); + return r; + } } DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n", @@ -714,17 +725,6 @@ int evergreen_blit_init(struct radeon_device *rdev) radeon_bo_kunmap(rdev->r600_blit.shader_obj); radeon_bo_unreserve(rdev->r600_blit.shader_obj); -done: - r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); - if (unlikely(r != 0)) - return r; - r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, - &rdev->r600_blit.shader_gpu_addr); - radeon_bo_unreserve(rdev->r600_blit.shader_obj); - if (r) { - dev_err(rdev->dev, "(%d) pin blit object failed\n", r); - return r; - } radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); return 0; } diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index fe553102df82..4004376362ee 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1316,7 +1316,6 @@ int cayman_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); radeon_vm_manager_suspend(rdev); - r600_blit_suspend(rdev); cayman_cp_enable(rdev, false); rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; evergreen_irq_suspend(rdev); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 9750f538cc86..af2f74a27081 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2307,20 +2307,6 @@ int r600_copy_blit(struct radeon_device *rdev, return 0; } -void r600_blit_suspend(struct radeon_device *rdev) -{ - int r; - - /* unpin shaders bo */ - if (rdev->r600_blit.shader_obj) { - r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); - if (!r) { - radeon_bo_unpin(rdev->r600_blit.shader_obj); - radeon_bo_unreserve(rdev->r600_blit.shader_obj); - } - } -} - int r600_set_surface_reg(struct radeon_device *rdev, int reg, uint32_t tiling_flags, uint32_t pitch, uint32_t offset, uint32_t obj_size) @@ -2461,7 +2447,6 @@ int r600_resume(struct radeon_device *rdev) int r600_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); - r600_blit_suspend(rdev); r600_cp_stop(rdev); rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; r600_irq_suspend(rdev); diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 2b8d6418a30c..2bef8549ddfe 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -524,10 +524,6 @@ int r600_blit_init(struct radeon_device *rdev) rdev->r600_blit.max_dim = 8192; - /* pin copy shader into vram if already initialized */ - if (rdev->r600_blit.shader_obj) - goto done; - rdev->r600_blit.state_offset = 0; if (rdev->family >= CHIP_RV770) @@ -552,11 +548,26 @@ int r600_blit_init(struct radeon_device *rdev) obj_size += r6xx_ps_size * 4; obj_size = ALIGN(obj_size, 256); - r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, - NULL, &rdev->r600_blit.shader_obj); - if (r) { - DRM_ERROR("r600 failed to allocate shader\n"); - return r; + /* pin copy shader into vram if not already initialized */ + if (rdev->r600_blit.shader_obj == NULL) { + r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, + RADEON_GEM_DOMAIN_VRAM, + NULL, &rdev->r600_blit.shader_obj); + if (r) { + DRM_ERROR("r600 failed to allocate shader\n"); + return r; + } + + r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, + &rdev->r600_blit.shader_gpu_addr); + radeon_bo_unreserve(rdev->r600_blit.shader_obj); + if (r) { + dev_err(rdev->dev, "(%d) pin blit object failed\n", r); + return r; + } } DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n", @@ -587,17 +598,6 @@ int r600_blit_init(struct radeon_device *rdev) radeon_bo_kunmap(rdev->r600_blit.shader_obj); radeon_bo_unreserve(rdev->r600_blit.shader_obj); -done: - r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); - if (unlikely(r != 0)) - return r; - r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, - &rdev->r600_blit.shader_gpu_addr); - radeon_bo_unreserve(rdev->r600_blit.shader_obj); - if (r) { - dev_err(rdev->dev, "(%d) pin blit object failed\n", r); - return r; - } radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 7df76b9cc3af..8a8c3f82950e 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -735,8 +735,6 @@ struct r600_blit { u32 state_len; }; -void r600_blit_suspend(struct radeon_device *rdev); - /* * SI RLC stuff */ diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index cc0ffb9be2bd..2004f0d94f24 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -996,7 +996,6 @@ int rv770_resume(struct radeon_device *rdev) int rv770_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); - r600_blit_suspend(rdev); r700_cp_stop(rdev); rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; r600_irq_suspend(rdev); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 40405d302723..7c2618b9aa34 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -3810,9 +3810,6 @@ int si_resume(struct radeon_device *rdev) int si_suspend(struct radeon_device *rdev) { radeon_vm_manager_suspend(rdev); -#if 0 - r600_blit_suspend(rdev); -#endif si_cp_enable(rdev, false); rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; -- cgit v1.2.3 From c6105f249a637e1bb2b04b1cad7feaf507d06e8c Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 5 Jul 2012 14:32:00 +0200 Subject: drm/radeon: remove vm_manager start/suspend MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just restore the page table instead. Addressing three problem with this change: 1. Calling vm_manager_suspend in the suspend path is problematic cause it wants to wait for the VM use to end, which in case of a lockup never happens. 2. In case of a locked up memory controller unbinding the VM seems to make it even more unstable, creating an unrecoverable lockup in the end. 3. If we want to backup/restore the leftover ring content we must not unbind VMs in between. Signed-off-by: Christian König Reviewed-by: Alex Deucher --- drivers/gpu/drm/radeon/ni.c | 12 ++---- drivers/gpu/drm/radeon/radeon.h | 2 - drivers/gpu/drm/radeon/radeon_gart.c | 83 ++++++++++++++++++++++-------------- drivers/gpu/drm/radeon/si.c | 12 ++---- 4 files changed, 59 insertions(+), 50 deletions(-) diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 4004376362ee..ec5307c582f4 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1280,9 +1280,11 @@ static int cayman_startup(struct radeon_device *rdev) if (r) return r; - r = radeon_vm_manager_start(rdev); - if (r) + r = radeon_vm_manager_init(rdev); + if (r) { + dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r); return r; + } r = r600_audio_init(rdev); if (r) @@ -1315,7 +1317,6 @@ int cayman_resume(struct radeon_device *rdev) int cayman_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); - radeon_vm_manager_suspend(rdev); cayman_cp_enable(rdev, false); rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; evergreen_irq_suspend(rdev); @@ -1392,11 +1393,6 @@ int cayman_init(struct radeon_device *rdev) return r; rdev->accel_working = true; - r = radeon_vm_manager_init(rdev); - if (r) { - dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r); - } - r = cayman_startup(rdev); if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 8a8c3f82950e..872270c9a0d0 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1759,8 +1759,6 @@ extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size */ int radeon_vm_manager_init(struct radeon_device *rdev); void radeon_vm_manager_fini(struct radeon_device *rdev); -int radeon_vm_manager_start(struct radeon_device *rdev); -int radeon_vm_manager_suspend(struct radeon_device *rdev); int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm); diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index ee11c5073726..56752da4b816 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -282,27 +282,58 @@ void radeon_gart_fini(struct radeon_device *rdev) * * TODO bind a default page at vm initialization for default address */ + int radeon_vm_manager_init(struct radeon_device *rdev) { + struct radeon_vm *vm; + struct radeon_bo_va *bo_va; int r; - rdev->vm_manager.enabled = false; + if (!rdev->vm_manager.enabled) { + /* mark first vm as always in use, it's the system one */ + r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, + rdev->vm_manager.max_pfn * 8, + RADEON_GEM_DOMAIN_VRAM); + if (r) { + dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", + (rdev->vm_manager.max_pfn * 8) >> 10); + return r; + } - /* mark first vm as always in use, it's the system one */ - r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, - rdev->vm_manager.max_pfn * 8, - RADEON_GEM_DOMAIN_VRAM); - if (r) { - dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", - (rdev->vm_manager.max_pfn * 8) >> 10); - return r; + r = rdev->vm_manager.funcs->init(rdev); + if (r) + return r; + + rdev->vm_manager.enabled = true; + + r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager); + if (r) + return r; } - r = rdev->vm_manager.funcs->init(rdev); - if (r == 0) - rdev->vm_manager.enabled = true; + /* restore page table */ + list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) { + if (vm->id == -1) + continue; - return r; + list_for_each_entry(bo_va, &vm->va, vm_list) { + struct ttm_mem_reg *mem = NULL; + if (bo_va->valid) + mem = &bo_va->bo->tbo.mem; + + bo_va->valid = false; + r = radeon_vm_bo_update_pte(rdev, vm, bo_va->bo, mem); + if (r) { + DRM_ERROR("Failed to update pte for vm %d!\n", vm->id); + } + } + + r = rdev->vm_manager.funcs->bind(rdev, vm, vm->id); + if (r) { + DRM_ERROR("Failed to bind vm %d!\n", vm->id); + } + } + return 0; } /* global mutex must be lock */ @@ -346,27 +377,12 @@ static void radeon_vm_unbind_locked(struct radeon_device *rdev, } void radeon_vm_manager_fini(struct radeon_device *rdev) -{ - if (rdev->vm_manager.sa_manager.bo == NULL) - return; - radeon_vm_manager_suspend(rdev); - rdev->vm_manager.funcs->fini(rdev); - radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager); - rdev->vm_manager.enabled = false; -} - -int radeon_vm_manager_start(struct radeon_device *rdev) -{ - if (rdev->vm_manager.sa_manager.bo == NULL) { - return -EINVAL; - } - return radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager); -} - -int radeon_vm_manager_suspend(struct radeon_device *rdev) { struct radeon_vm *vm, *tmp; + if (!rdev->vm_manager.enabled) + return; + mutex_lock(&rdev->vm_manager.lock); /* unbind all active vm */ list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) { @@ -374,7 +390,10 @@ int radeon_vm_manager_suspend(struct radeon_device *rdev) } rdev->vm_manager.funcs->fini(rdev); mutex_unlock(&rdev->vm_manager.lock); - return radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager); + + radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager); + radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager); + rdev->vm_manager.enabled = false; } /* global mutex must be locked */ diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 7c2618b9aa34..2b691abef4f7 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -3777,9 +3777,11 @@ static int si_startup(struct radeon_device *rdev) return r; } - r = radeon_vm_manager_start(rdev); - if (r) + r = radeon_vm_manager_init(rdev); + if (r) { + dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r); return r; + } return 0; } @@ -3809,7 +3811,6 @@ int si_resume(struct radeon_device *rdev) int si_suspend(struct radeon_device *rdev) { - radeon_vm_manager_suspend(rdev); si_cp_enable(rdev, false); rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; @@ -3899,11 +3900,6 @@ int si_init(struct radeon_device *rdev) return r; rdev->accel_working = true; - r = radeon_vm_manager_init(rdev); - if (r) { - dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r); - } - r = si_startup(rdev); if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); -- cgit v1.2.3 From 04eb2206d8022dc4a1eadb5e9cc5122c84959881 Mon Sep 17 00:00:00 2001 From: Christian König Date: Sat, 7 Jul 2012 12:47:58 +0200 Subject: drm/radeon: move radeon_ib_ring_tests out of chipset code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Making it easier to control when it is executed. Signed-off-by: Christian König Reviewed-by: Michel Dänzer Reviewed-by: Alex Deucher --- drivers/gpu/drm/radeon/evergreen.c | 4 ---- drivers/gpu/drm/radeon/ni.c | 4 ---- drivers/gpu/drm/radeon/r100.c | 4 ---- drivers/gpu/drm/radeon/r300.c | 4 ---- drivers/gpu/drm/radeon/r420.c | 4 ---- drivers/gpu/drm/radeon/r520.c | 4 ---- drivers/gpu/drm/radeon/r600.c | 4 ---- drivers/gpu/drm/radeon/radeon_device.c | 15 +++++++++++++++ drivers/gpu/drm/radeon/rs400.c | 4 ---- drivers/gpu/drm/radeon/rs600.c | 4 ---- drivers/gpu/drm/radeon/rs690.c | 4 ---- drivers/gpu/drm/radeon/rv515.c | 4 ---- drivers/gpu/drm/radeon/rv770.c | 4 ---- drivers/gpu/drm/radeon/si.c | 21 --------------------- 14 files changed, 15 insertions(+), 69 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 82f7aea14a6e..f39b900d46f9 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -3093,10 +3093,6 @@ static int evergreen_startup(struct radeon_device *rdev) return r; } - r = radeon_ib_ring_tests(rdev); - if (r) - return r; - r = r600_audio_init(rdev); if (r) { DRM_ERROR("radeon: audio init failed\n"); diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index ec5307c582f4..f2afefb44b7c 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1276,10 +1276,6 @@ static int cayman_startup(struct radeon_device *rdev) return r; } - r = radeon_ib_ring_tests(rdev); - if (r) - return r; - r = radeon_vm_manager_init(rdev); if (r) { dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r); diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 9524bd4aca75..e0f5ae895f07 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -3887,10 +3887,6 @@ static int r100_startup(struct radeon_device *rdev) return r; } - r = radeon_ib_ring_tests(rdev); - if (r) - return r; - return 0; } diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index b396e341a7b8..646a1927dda7 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -1397,10 +1397,6 @@ static int r300_startup(struct radeon_device *rdev) return r; } - r = radeon_ib_ring_tests(rdev); - if (r) - return r; - return 0; } diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 0062938a589c..f2f5bf6d339f 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -281,10 +281,6 @@ static int r420_startup(struct radeon_device *rdev) return r; } - r = radeon_ib_ring_tests(rdev); - if (r) - return r; - return 0; } diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 6df3e51acb9f..079d3c52c08a 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c @@ -209,10 +209,6 @@ static int r520_startup(struct radeon_device *rdev) return r; } - r = radeon_ib_ring_tests(rdev); - if (r) - return r; - return 0; } diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index af2f74a27081..c808fa976d2d 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2395,10 +2395,6 @@ int r600_startup(struct radeon_device *rdev) return r; } - r = radeon_ib_ring_tests(rdev); - if (r) - return r; - r = r600_audio_init(rdev); if (r) { DRM_ERROR("radeon: audio init failed\n"); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 254fdb4f73c9..bbd09718e956 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -822,6 +822,10 @@ int radeon_device_init(struct radeon_device *rdev, if (r) return r; + r = radeon_ib_ring_tests(rdev); + if (r) + DRM_ERROR("ib ring test failed (%d).\n", r); + if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { /* Acceleration not working on AGP card try again * with fallback to PCI or PCIE GART @@ -946,6 +950,7 @@ int radeon_resume_kms(struct drm_device *dev) { struct drm_connector *connector; struct radeon_device *rdev = dev->dev_private; + int r; if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; @@ -960,6 +965,11 @@ int radeon_resume_kms(struct drm_device *dev) /* resume AGP if in use */ radeon_agp_resume(rdev); radeon_resume(rdev); + + r = radeon_ib_ring_tests(rdev); + if (r) + DRM_ERROR("ib ring test failed (%d).\n", r); + radeon_pm_resume(rdev); radeon_restore_bios_scratch_regs(rdev); @@ -999,6 +1009,11 @@ int radeon_gpu_reset(struct radeon_device *rdev) if (!r) { dev_info(rdev->dev, "GPU reset succeed\n"); radeon_resume(rdev); + + r = radeon_ib_ring_tests(rdev); + if (r) + DRM_ERROR("ib ring test failed (%d).\n", r); + radeon_restore_bios_scratch_regs(rdev); drm_helper_resume_force_mode(rdev->ddev); ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index aa26076ff468..2752f7f78237 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -432,10 +432,6 @@ static int rs400_startup(struct radeon_device *rdev) return r; } - r = radeon_ib_ring_tests(rdev); - if (r) - return r; - return 0; } diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 6dad4e617727..5301b3df8466 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -913,10 +913,6 @@ static int rs600_startup(struct radeon_device *rdev) return r; } - r = radeon_ib_ring_tests(rdev); - if (r) - return r; - r = r600_audio_init(rdev); if (r) { dev_err(rdev->dev, "failed initializing audio\n"); diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 0c026b0210d4..3b663fcfe061 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -643,10 +643,6 @@ static int rs690_startup(struct radeon_device *rdev) return r; } - r = radeon_ib_ring_tests(rdev); - if (r) - return r; - r = r600_audio_init(rdev); if (r) { dev_err(rdev->dev, "failed initializing audio\n"); diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 01e9155541ea..a12fbcc8ccb6 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -414,10 +414,6 @@ static int rv515_startup(struct radeon_device *rdev) return r; } - r = radeon_ib_ring_tests(rdev); - if (r) - return r; - return 0; } diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 2004f0d94f24..b4b1256fb15b 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -957,10 +957,6 @@ static int rv770_startup(struct radeon_device *rdev) return r; } - r = radeon_ib_ring_tests(rdev); - if (r) - return r; - r = r600_audio_init(rdev); if (r) { DRM_ERROR("radeon: audio init failed\n"); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 2b691abef4f7..f61b550f9efd 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -3756,27 +3756,6 @@ static int si_startup(struct radeon_device *rdev) return r; } - r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); - if (r) { - DRM_ERROR("radeon: failed testing IB (%d) on CP ring 0\n", r); - rdev->accel_working = false; - return r; - } - - r = radeon_ib_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]); - if (r) { - DRM_ERROR("radeon: failed testing IB (%d) on CP ring 1\n", r); - rdev->accel_working = false; - return r; - } - - r = radeon_ib_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]); - if (r) { - DRM_ERROR("radeon: failed testing IB (%d) on CP ring 2\n", r); - rdev->accel_working = false; - return r; - } - r = radeon_vm_manager_init(rdev); if (r) { dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r); -- cgit v1.2.3 From 45df68035c4964d42ea3850980708ce8674f75b3 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 6 Jul 2012 16:22:55 +0200 Subject: drm/radeon: record what is next valid wptr for each ring v4 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Before emitting any indirect buffer, emit the offset of the next valid ring content if any. This allow code that want to resume ring to resume ring right after ib that caused GPU lockup. v2: use scratch registers instead of storing it into memory v3: skip over the surface sync for ni and si as well v4: use SET_CONFIG_REG instead of PACKET0 Signed-off-by: Jerome Glisse Signed-off-by: Christian König Reviewed-by: Alex Deucher --- drivers/gpu/drm/radeon/evergreen.c | 10 +++++++++- drivers/gpu/drm/radeon/ni.c | 13 ++++++++++++- drivers/gpu/drm/radeon/r600.c | 20 ++++++++++++++++++-- drivers/gpu/drm/radeon/radeon.h | 1 + drivers/gpu/drm/radeon/radeon_ring.c | 4 ++++ drivers/gpu/drm/radeon/rv770.c | 4 +++- drivers/gpu/drm/radeon/si.c | 24 +++++++++++++++++++++--- 7 files changed, 68 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f39b900d46f9..4b8e5c5fcf84 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1368,7 +1368,15 @@ void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) /* set to DX10/11 mode */ radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); radeon_ring_write(ring, 1); - /* FIXME: implement */ + + if (ring->rptr_save_reg) { + uint32_t next_rptr = ring->wptr + 3 + 4; + radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + radeon_ring_write(ring, ((ring->rptr_save_reg - + PACKET3_SET_CONFIG_REG_START) >> 2)); + radeon_ring_write(ring, next_rptr); + } + radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); radeon_ring_write(ring, #ifdef __BIG_ENDIAN diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index f2afefb44b7c..ddfef8cdd838 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -855,6 +855,15 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) /* set to DX10/11 mode */ radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); radeon_ring_write(ring, 1); + + if (ring->rptr_save_reg) { + uint32_t next_rptr = ring->wptr + 3 + 4 + 8; + radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + radeon_ring_write(ring, ((ring->rptr_save_reg - + PACKET3_SET_CONFIG_REG_START) >> 2)); + radeon_ring_write(ring, next_rptr); + } + radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); radeon_ring_write(ring, #ifdef __BIG_ENDIAN @@ -981,8 +990,10 @@ static int cayman_cp_start(struct radeon_device *rdev) static void cayman_cp_fini(struct radeon_device *rdev) { + struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; cayman_cp_enable(rdev, false); - radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); + radeon_ring_fini(rdev, ring); + radeon_scratch_free(rdev, ring->rptr_save_reg); } int cayman_cp_resume(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index c808fa976d2d..3156d251b3c2 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2155,18 +2155,27 @@ int r600_cp_resume(struct radeon_device *rdev) void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size) { u32 rb_bufsz; + int r; /* Align ring size */ rb_bufsz = drm_order(ring_size / 8); ring_size = (1 << (rb_bufsz + 1)) * 4; ring->ring_size = ring_size; ring->align_mask = 16 - 1; + + r = radeon_scratch_get(rdev, &ring->rptr_save_reg); + if (r) { + DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r); + ring->rptr_save_reg = 0; + } } void r600_cp_fini(struct radeon_device *rdev) { + struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; r600_cp_stop(rdev); - radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); + radeon_ring_fini(rdev, ring); + radeon_scratch_free(rdev, ring->rptr_save_reg); } @@ -2568,7 +2577,14 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) { struct radeon_ring *ring = &rdev->ring[ib->ring]; - /* FIXME: implement */ + if (ring->rptr_save_reg) { + uint32_t next_rptr = ring->wptr + 3 + 4; + radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + radeon_ring_write(ring, ((ring->rptr_save_reg - + PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); + radeon_ring_write(ring, next_rptr); + } + radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); radeon_ring_write(ring, #ifdef __BIG_ENDIAN diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 872270c9a0d0..64d39adaad91 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -622,6 +622,7 @@ struct radeon_ring { unsigned rptr; unsigned rptr_offs; unsigned rptr_reg; + unsigned rptr_save_reg; unsigned wptr; unsigned wptr_old; unsigned wptr_reg; diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 087383408a8f..ce8eb9d5af5d 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -451,6 +451,10 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data) count = (ring->ring_size / 4) - ring->ring_free_dw; seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg)); seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg)); + if (ring->rptr_save_reg) { + seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg, + RREG32(ring->rptr_save_reg)); + } seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr); seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr); seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index b4b1256fb15b..eb4704e72bdb 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -358,8 +358,10 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev) void r700_cp_fini(struct radeon_device *rdev) { + struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; r700_cp_stop(rdev); - radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); + radeon_ring_fini(rdev, ring); + radeon_scratch_free(rdev, ring->rptr_save_reg); } /* diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index f61b550f9efd..53e313b05751 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -1765,6 +1765,14 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) struct radeon_ring *ring = &rdev->ring[ib->ring]; u32 header; + if (ring->rptr_save_reg) { + uint32_t next_rptr = ring->wptr + 3 + 4 + 8; + radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + radeon_ring_write(ring, ((ring->rptr_save_reg - + PACKET3_SET_CONFIG_REG_START) >> 2)); + radeon_ring_write(ring, next_rptr); + } + if (ib->is_const_ib) header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2); else @@ -1917,10 +1925,20 @@ static int si_cp_start(struct radeon_device *rdev) static void si_cp_fini(struct radeon_device *rdev) { + struct radeon_ring *ring; si_cp_enable(rdev, false); - radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); - radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]); - radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]); + + ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; + radeon_ring_fini(rdev, ring); + radeon_scratch_free(rdev, ring->rptr_save_reg); + + ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; + radeon_ring_fini(rdev, ring); + radeon_scratch_free(rdev, ring->rptr_save_reg); + + ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; + radeon_ring_fini(rdev, ring); + radeon_scratch_free(rdev, ring->rptr_save_reg); } static int si_cp_resume(struct radeon_device *rdev) -- cgit v1.2.3 From 55d7c22192becd0ec827a6901899ff56fa985658 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 9 Jul 2012 11:52:44 +0200 Subject: drm/radeon: implement ring saving on reset v4 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Try to save whatever is on the rings when we encounter an lockup. v2: Fix spelling error. Free saved ring data if reset fails. Add documentation for the new functions. v3: Some more spelling fixes v4: It doesn't make sense to save anything if all fences are signaled Signed-off-by: Christian König Reviewed-by: Michel Dänzer Reviewed-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon.h | 4 ++ drivers/gpu/drm/radeon/radeon_device.c | 48 ++++++++++++++++---- drivers/gpu/drm/radeon/radeon_ring.c | 82 ++++++++++++++++++++++++++++++++++ 3 files changed, 126 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 64d39adaad91..6715e4c695fa 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -768,6 +768,10 @@ int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring); void radeon_ring_lockup_update(struct radeon_ring *ring); bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring); +unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring, + uint32_t **data); +int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, + unsigned size, uint32_t *data); int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size, unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index bbd09718e956..0302a9f3e674 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -996,7 +996,12 @@ int radeon_resume_kms(struct drm_device *dev) int radeon_gpu_reset(struct radeon_device *rdev) { - int r; + unsigned ring_sizes[RADEON_NUM_RINGS]; + uint32_t *ring_data[RADEON_NUM_RINGS]; + + bool saved = false; + + int i, r; int resched; down_write(&rdev->exclusive_lock); @@ -1005,20 +1010,47 @@ int radeon_gpu_reset(struct radeon_device *rdev) resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); radeon_suspend(rdev); + for (i = 0; i < RADEON_NUM_RINGS; ++i) { + ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i], + &ring_data[i]); + if (ring_sizes[i]) { + saved = true; + dev_info(rdev->dev, "Saved %d dwords of commands " + "on ring %d.\n", ring_sizes[i], i); + } + } + +retry: r = radeon_asic_reset(rdev); if (!r) { - dev_info(rdev->dev, "GPU reset succeed\n"); + dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n"); radeon_resume(rdev); + } - r = radeon_ib_ring_tests(rdev); - if (r) - DRM_ERROR("ib ring test failed (%d).\n", r); + radeon_restore_bios_scratch_regs(rdev); + drm_helper_resume_force_mode(rdev->ddev); - radeon_restore_bios_scratch_regs(rdev); - drm_helper_resume_force_mode(rdev->ddev); - ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); + if (!r) { + for (i = 0; i < RADEON_NUM_RINGS; ++i) { + radeon_ring_restore(rdev, &rdev->ring[i], + ring_sizes[i], ring_data[i]); + } + + r = radeon_ib_ring_tests(rdev); + if (r) { + dev_err(rdev->dev, "ib ring test failed (%d).\n", r); + if (saved) { + radeon_suspend(rdev); + goto retry; + } + } + } else { + for (i = 0; i < RADEON_NUM_RINGS; ++i) { + kfree(ring_data[i]); + } } + ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); if (r) { /* bad news, how to tell it to userspace ? */ dev_info(rdev->dev, "GPU reset failed\n"); diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index ce8eb9d5af5d..75cbe4641138 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -362,6 +362,88 @@ bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *rin return false; } +/** + * radeon_ring_backup - Back up the content of a ring + * + * @rdev: radeon_device pointer + * @ring: the ring we want to back up + * + * Saves all unprocessed commits from a ring, returns the number of dwords saved. + */ +unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring, + uint32_t **data) +{ + unsigned size, ptr, i; + int ridx = radeon_ring_index(rdev, ring); + + /* just in case lock the ring */ + mutex_lock(&rdev->ring_lock); + *data = NULL; + + if (ring->ring_obj == NULL || !ring->rptr_save_reg) { + mutex_unlock(&rdev->ring_lock); + return 0; + } + + /* it doesn't make sense to save anything if all fences are signaled */ + if (!radeon_fence_count_emitted(rdev, ridx)) { + mutex_unlock(&rdev->ring_lock); + return 0; + } + + /* calculate the number of dw on the ring */ + ptr = RREG32(ring->rptr_save_reg); + size = ring->wptr + (ring->ring_size / 4); + size -= ptr; + size &= ring->ptr_mask; + if (size == 0) { + mutex_unlock(&rdev->ring_lock); + return 0; + } + + /* and then save the content of the ring */ + *data = kmalloc(size * 4, GFP_KERNEL); + for (i = 0; i < size; ++i) { + (*data)[i] = ring->ring[ptr++]; + ptr &= ring->ptr_mask; + } + + mutex_unlock(&rdev->ring_lock); + return size; +} + +/** + * radeon_ring_restore - append saved commands to the ring again + * + * @rdev: radeon_device pointer + * @ring: ring to append commands to + * @size: number of dwords we want to write + * @data: saved commands + * + * Allocates space on the ring and restore the previously saved commands. + */ +int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, + unsigned size, uint32_t *data) +{ + int i, r; + + if (!size || !data) + return 0; + + /* restore the saved ring content */ + r = radeon_ring_lock(rdev, ring, size); + if (r) + return r; + + for (i = 0; i < size; ++i) { + radeon_ring_write(ring, data[i]); + } + + radeon_ring_unlock_commit(rdev, ring); + kfree(data); + return 0; +} + int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size, unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop) -- cgit v1.2.3 From 29935554b384b1b3a7377d6f0b03b21d18a61683 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 30 May 2012 00:58:09 +0200 Subject: drm: Disallow DRM_IOCTL_MODESET_CTL for KMS drivers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit DRM_IOCTL_MODESET_CTL must only be used for UMS drivers. Make it a no-op for KMS drivers. Signed-off-by: Laurent Pinchart Reviewed-by: Michel Dänzer Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_irq.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index c798eeae0a03..03f16f352fe2 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -974,7 +974,6 @@ EXPORT_SYMBOL(drm_vblank_off); * drm_vblank_pre_modeset - account for vblanks across mode sets * @dev: DRM device * @crtc: CRTC in question - * @post: post or pre mode set? * * Account for vblank events across mode setting events, which will likely * reset the hardware frame counter. @@ -1037,6 +1036,10 @@ int drm_modeset_ctl(struct drm_device *dev, void *data, if (!dev->num_crtcs) return 0; + /* KMS drivers handle this internally */ + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return 0; + crtc = modeset->crtc; if (crtc >= dev->num_crtcs) return -EINVAL; -- cgit v1.2.3 From 246fa345a1b72fddb9b2b81f32abd615a5d1c0af Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 11 Jul 2012 17:12:11 +0200 Subject: drm/radeon: return an error if there is nothing to wait for MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Otherwise the sa managers out of memory handling doesn't work. Signed-off-by: Christian König Reviewed-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_fence.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 76c5b22bfd22..7a181c386cbd 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -331,7 +331,7 @@ static int radeon_fence_wait_any_seq(struct radeon_device *rdev, /* nothing to wait for ? */ if (ring == RADEON_NUM_RINGS) { - return 0; + return -ENOENT; } while (!radeon_fence_any_seq_signaled(rdev, target_seq)) { -- cgit v1.2.3 From bfb38d35c1cacb182d8bbda23379397bffeafc8c Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 11 Jul 2012 21:07:57 +0200 Subject: drm/radeon: let sa manager block for fences to wait for v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Otherwise we can encounter out of memory situations under extreme load. v2: add documentation for the new function Signed-off-by: Christian König Reviewed-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon.h | 2 +- drivers/gpu/drm/radeon/radeon_sa.c | 82 ++++++++++++++++++++++++++++---------- 2 files changed, 61 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 6715e4c695fa..2cb355bf2725 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -362,7 +362,7 @@ struct radeon_bo_list { * alignment). */ struct radeon_sa_manager { - spinlock_t lock; + wait_queue_head_t wq; struct radeon_bo *bo; struct list_head *hole; struct list_head flist[RADEON_NUM_RINGS]; diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c index 81dbb5b946ef..4e771240fdd0 100644 --- a/drivers/gpu/drm/radeon/radeon_sa.c +++ b/drivers/gpu/drm/radeon/radeon_sa.c @@ -54,7 +54,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev, { int i, r; - spin_lock_init(&sa_manager->lock); + init_waitqueue_head(&sa_manager->wq); sa_manager->bo = NULL; sa_manager->size = size; sa_manager->domain = domain; @@ -211,6 +211,39 @@ static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager, return false; } +/** + * radeon_sa_event - Check if we can stop waiting + * + * @sa_manager: pointer to the sa_manager + * @size: number of bytes we want to allocate + * @align: alignment we need to match + * + * Check if either there is a fence we can wait for or + * enough free memory to satisfy the allocation directly + */ +static bool radeon_sa_event(struct radeon_sa_manager *sa_manager, + unsigned size, unsigned align) +{ + unsigned soffset, eoffset, wasted; + int i; + + for (i = 0; i < RADEON_NUM_RINGS; ++i) { + if (!list_empty(&sa_manager->flist[i])) { + return true; + } + } + + soffset = radeon_sa_bo_hole_soffset(sa_manager); + eoffset = radeon_sa_bo_hole_eoffset(sa_manager); + wasted = (align - (soffset % align)) % align; + + if ((eoffset - soffset) >= (size + wasted)) { + return true; + } + + return false; +} + static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager, struct radeon_fence **fences, unsigned *tries) @@ -297,8 +330,8 @@ int radeon_sa_bo_new(struct radeon_device *rdev, INIT_LIST_HEAD(&(*sa_bo)->olist); INIT_LIST_HEAD(&(*sa_bo)->flist); - spin_lock(&sa_manager->lock); - do { + spin_lock(&sa_manager->wq.lock); + while(1) { for (i = 0; i < RADEON_NUM_RINGS; ++i) { fences[i] = NULL; tries[i] = 0; @@ -309,30 +342,34 @@ int radeon_sa_bo_new(struct radeon_device *rdev, if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo, size, align)) { - spin_unlock(&sa_manager->lock); + spin_unlock(&sa_manager->wq.lock); return 0; } /* see if we can skip over some allocations */ } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); - if (block) { - spin_unlock(&sa_manager->lock); - r = radeon_fence_wait_any(rdev, fences, false); - spin_lock(&sa_manager->lock); - if (r) { - /* if we have nothing to wait for we - are practically out of memory */ - if (r == -ENOENT) { - r = -ENOMEM; - } - goto out_err; - } + if (!block) { + break; + } + + spin_unlock(&sa_manager->wq.lock); + r = radeon_fence_wait_any(rdev, fences, false); + spin_lock(&sa_manager->wq.lock); + /* if we have nothing to wait for block */ + if (r == -ENOENT) { + r = wait_event_interruptible_locked( + sa_manager->wq, + radeon_sa_event(sa_manager, size, align) + ); + } + if (r) { + goto out_err; } - } while (block); + }; out_err: - spin_unlock(&sa_manager->lock); + spin_unlock(&sa_manager->wq.lock); kfree(*sa_bo); *sa_bo = NULL; return r; @@ -348,7 +385,7 @@ void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo, } sa_manager = (*sa_bo)->manager; - spin_lock(&sa_manager->lock); + spin_lock(&sa_manager->wq.lock); if (fence && !radeon_fence_signaled(fence)) { (*sa_bo)->fence = radeon_fence_ref(fence); list_add_tail(&(*sa_bo)->flist, @@ -356,7 +393,8 @@ void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo, } else { radeon_sa_bo_remove_locked(*sa_bo); } - spin_unlock(&sa_manager->lock); + wake_up_all_locked(&sa_manager->wq); + spin_unlock(&sa_manager->wq.lock); *sa_bo = NULL; } @@ -366,7 +404,7 @@ void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager, { struct radeon_sa_bo *i; - spin_lock(&sa_manager->lock); + spin_lock(&sa_manager->wq.lock); list_for_each_entry(i, &sa_manager->olist, olist) { if (&i->olist == sa_manager->hole) { seq_printf(m, ">"); @@ -381,6 +419,6 @@ void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager, } seq_printf(m, "\n"); } - spin_unlock(&sa_manager->lock); + spin_unlock(&sa_manager->wq.lock); } #endif -- cgit v1.2.3 From 4ef72566f1035fef5abd0913100d13746e066ee3 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 13 Jul 2012 13:06:00 +0200 Subject: drm/radeon: fix const IB handling v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Const IBs are executed on the CE not the CP, so we can't fence them in the normal way. So submit them directly before the IB instead, just as the documentation says. v2: keep the extra documentation Signed-off-by: Christian König Reviewed-by: Alex Deucher --- drivers/gpu/drm/radeon/r100.c | 2 +- drivers/gpu/drm/radeon/r600.c | 2 +- drivers/gpu/drm/radeon/radeon.h | 3 ++- drivers/gpu/drm/radeon/radeon_cs.c | 25 ++++++++++++------------- drivers/gpu/drm/radeon/radeon_ring.c | 10 +++++++++- 5 files changed, 25 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index e0f5ae895f07..4ee5a74dac22 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -3693,7 +3693,7 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) ib.ptr[6] = PACKET2(0); ib.ptr[7] = PACKET2(0); ib.length_dw = 8; - r = radeon_ib_schedule(rdev, &ib); + r = radeon_ib_schedule(rdev, &ib, NULL); if (r) { radeon_scratch_free(rdev, scratch); radeon_ib_free(rdev, &ib); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 3156d251b3c2..c2e506919995 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2619,7 +2619,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; - r = radeon_ib_schedule(rdev, &ib); + r = radeon_ib_schedule(rdev, &ib, NULL); if (r) { radeon_scratch_free(rdev, scratch); radeon_ib_free(rdev, &ib); diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 2cb355bf2725..2d7f06c85336 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -751,7 +751,8 @@ struct si_rlc { int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib *ib, unsigned size); void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib); -int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib); +int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, + struct radeon_ib *const_ib); int radeon_ib_pool_init(struct radeon_device *rdev); void radeon_ib_pool_fini(struct radeon_device *rdev); int radeon_ib_ring_tests(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 553da67a4cdd..8a4c49ef0cc4 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -354,7 +354,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev, } radeon_cs_sync_rings(parser); parser->ib.vm_id = 0; - r = radeon_ib_schedule(rdev, &parser->ib); + r = radeon_ib_schedule(rdev, &parser->ib, NULL); if (r) { DRM_ERROR("Failed to schedule IB !\n"); } @@ -452,25 +452,24 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, } radeon_cs_sync_rings(parser); + parser->ib.vm_id = vm->id; + /* ib pool is bind at 0 in virtual address space, + * so gpu_addr is the offset inside the pool bo + */ + parser->ib.gpu_addr = parser->ib.sa_bo->soffset; + if ((rdev->family >= CHIP_TAHITI) && (parser->chunk_const_ib_idx != -1)) { parser->const_ib.vm_id = vm->id; - /* ib pool is bind at 0 in virtual address space to gpu_addr is the - * offset inside the pool bo + /* ib pool is bind at 0 in virtual address space, + * so gpu_addr is the offset inside the pool bo */ parser->const_ib.gpu_addr = parser->const_ib.sa_bo->soffset; - r = radeon_ib_schedule(rdev, &parser->const_ib); - if (r) - goto out; + r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib); + } else { + r = radeon_ib_schedule(rdev, &parser->ib, NULL); } - parser->ib.vm_id = vm->id; - /* ib pool is bind at 0 in virtual address space to gpu_addr is the - * offset inside the pool bo - */ - parser->ib.gpu_addr = parser->ib.sa_bo->soffset; - parser->ib.is_const_ib = false; - r = radeon_ib_schedule(rdev, &parser->ib); out: if (!r) { if (vm->fence) { diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 75cbe4641138..c48c35403774 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -74,7 +74,8 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) radeon_fence_unref(&ib->fence); } -int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) +int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, + struct radeon_ib *const_ib) { struct radeon_ring *ring = &rdev->ring[ib->ring]; bool need_sync = false; @@ -105,6 +106,10 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) if (!need_sync) { radeon_semaphore_free(rdev, &ib->semaphore, NULL); } + if (const_ib) { + radeon_ring_ib_execute(rdev, const_ib->ring, const_ib); + radeon_semaphore_free(rdev, &const_ib->semaphore, NULL); + } radeon_ring_ib_execute(rdev, ib->ring, ib); r = radeon_fence_emit(rdev, &ib->fence, ib->ring); if (r) { @@ -112,6 +117,9 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) radeon_ring_unlock_undo(rdev, ring); return r; } + if (const_ib) { + const_ib->fence = radeon_fence_ref(ib->fence); + } radeon_ring_unlock_commit(rdev, ring); return 0; } -- cgit v1.2.3 From a85a7da4c5922e6621526946d5a56b2194692fdd Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 17 Jul 2012 14:02:29 -0400 Subject: drm/radeon: update ib_execute for SI (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When submitting a CONST_IB, emit a SWITCH_BUFFER packet before the CONST_IB. This isn't strictly necessary (the driver will work fine without it), but is good practice and allows for more flexible DE/CE sychronization options in the future. Current userspace drivers do not take advantage of the CE yet. v2: - clean up code flow a bit - no need to flush caches for CONST IB Signed-off-by: Alex Deucher Reviewed-by: Christian König --- drivers/gpu/drm/radeon/si.c | 49 +++++++++++++++++++++++++------------------- drivers/gpu/drm/radeon/sid.h | 1 + 2 files changed, 29 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 53e313b05751..2b12cae52b29 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -1765,18 +1765,23 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) struct radeon_ring *ring = &rdev->ring[ib->ring]; u32 header; - if (ring->rptr_save_reg) { - uint32_t next_rptr = ring->wptr + 3 + 4 + 8; - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); - radeon_ring_write(ring, ((ring->rptr_save_reg - - PACKET3_SET_CONFIG_REG_START) >> 2)); - radeon_ring_write(ring, next_rptr); - } + if (ib->is_const_ib) { + /* set switch buffer packet before const IB */ + radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + radeon_ring_write(ring, 0); - if (ib->is_const_ib) header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2); - else + } else { + if (ring->rptr_save_reg) { + uint32_t next_rptr = ring->wptr + 3 + 4 + 8; + radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + radeon_ring_write(ring, ((ring->rptr_save_reg - + PACKET3_SET_CONFIG_REG_START) >> 2)); + radeon_ring_write(ring, next_rptr); + } + header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); + } radeon_ring_write(ring, header); radeon_ring_write(ring, @@ -1787,18 +1792,20 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24)); - /* flush read cache over gart for this vmid */ - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); - radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); - radeon_ring_write(ring, ib->vm_id); - radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); - radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA | - PACKET3_TC_ACTION_ENA | - PACKET3_SH_KCACHE_ACTION_ENA | - PACKET3_SH_ICACHE_ACTION_ENA); - radeon_ring_write(ring, 0xFFFFFFFF); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, 10); /* poll interval */ + if (!ib->is_const_ib) { + /* flush read cache over gart for this vmid */ + radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); + radeon_ring_write(ring, ib->vm_id); + radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); + radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA | + PACKET3_TC_ACTION_ENA | + PACKET3_SH_KCACHE_ACTION_ENA | + PACKET3_SH_ICACHE_ACTION_ENA); + radeon_ring_write(ring, 0xFFFFFFFF); + radeon_ring_write(ring, 0); + radeon_ring_write(ring, 10); /* poll interval */ + } } /* diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index db4067962868..7869089e8761 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -901,5 +901,6 @@ #define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88 #define PACKET3_SET_CE_DE_COUNTERS 0x89 #define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A +#define PACKET3_SWITCH_BUFFER 0x8B #endif -- cgit v1.2.3 From 8b25ed3482885e5f1dc65ace796e90f879d76c52 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 17 Jul 2012 14:02:30 -0400 Subject: drm/radeon: remove radeon_ring_index() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just store the index in the ring structure. Idea taken from one of Jerome's wip rptr patches. Signed-off-by: Alex Deucher Reviewed-by: Christian König --- drivers/gpu/drm/radeon/r600.c | 11 +++++------ drivers/gpu/drm/radeon/radeon.h | 2 +- drivers/gpu/drm/radeon/radeon_device.c | 4 ++++ drivers/gpu/drm/radeon/radeon_ring.c | 20 ++----------------- drivers/gpu/drm/radeon/radeon_test.c | 35 +++++++++++++++------------------- 5 files changed, 27 insertions(+), 45 deletions(-) diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index c2e506919995..9f24a804f6ea 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2198,7 +2198,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) { uint32_t scratch; uint32_t tmp = 0; - unsigned i, ridx = radeon_ring_index(rdev, ring); + unsigned i; int r; r = radeon_scratch_get(rdev, &scratch); @@ -2209,7 +2209,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) WREG32(scratch, 0xCAFEDEAD); r = radeon_ring_lock(rdev, ring, 3); if (r) { - DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r); + DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r); radeon_scratch_free(rdev, scratch); return r; } @@ -2224,10 +2224,10 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) DRM_UDELAY(1); } if (i < rdev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", ridx, i); + DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); } else { DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n", - ridx, scratch, tmp); + ring->idx, scratch, tmp); r = -EINVAL; } radeon_scratch_free(rdev, scratch); @@ -2602,7 +2602,6 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) uint32_t tmp = 0; unsigned i; int r; - int ring_index = radeon_ring_index(rdev, ring); r = radeon_scratch_get(rdev, &scratch); if (r) { @@ -2610,7 +2609,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) return r; } WREG32(scratch, 0xCAFEDEAD); - r = radeon_ib_get(rdev, ring_index, &ib, 256); + r = radeon_ib_get(rdev, ring->idx, &ib, 256); if (r) { DRM_ERROR("radeon: failed to get ib (%d).\n", r); return r; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 2d7f06c85336..be454725c377 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -638,6 +638,7 @@ struct radeon_ring { u32 ptr_reg_shift; u32 ptr_reg_mask; u32 nop; + u32 idx; }; /* @@ -757,7 +758,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev); void radeon_ib_pool_fini(struct radeon_device *rdev); int radeon_ib_ring_tests(struct radeon_device *rdev); /* Ring access between begin & end cannot sleep */ -int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp); void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp); int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 0302a9f3e674..6c534f42ef7b 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -721,6 +721,10 @@ int radeon_device_init(struct radeon_device *rdev, rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; rdev->accel_working = false; + /* set up ring ids */ + for (i = 0; i < RADEON_NUM_RINGS; i++) { + rdev->ring[i].idx = i; + } DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", radeon_family_name[rdev->family], pdev->vendor, pdev->device, diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index c48c35403774..c5828b909024 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -207,21 +207,6 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v) ring->ring_free_dw--; } -int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring) -{ - /* r1xx-r5xx only has CP ring */ - if (rdev->family < CHIP_R600) - return RADEON_RING_TYPE_GFX_INDEX; - - if (rdev->family >= CHIP_CAYMAN) { - if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]) - return CAYMAN_RING_TYPE_CP1_INDEX; - else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]) - return CAYMAN_RING_TYPE_CP2_INDEX; - } - return RADEON_RING_TYPE_GFX_INDEX; -} - void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring) { u32 rptr; @@ -253,7 +238,7 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi if (ndw < ring->ring_free_dw) { break; } - r = radeon_fence_wait_next_locked(rdev, radeon_ring_index(rdev, ring)); + r = radeon_fence_wait_next_locked(rdev, ring->idx); if (r) return r; } @@ -382,7 +367,6 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring uint32_t **data) { unsigned size, ptr, i; - int ridx = radeon_ring_index(rdev, ring); /* just in case lock the ring */ mutex_lock(&rdev->ring_lock); @@ -394,7 +378,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring } /* it doesn't make sense to save anything if all fences are signaled */ - if (!radeon_fence_count_emitted(rdev, ridx)) { + if (!radeon_fence_count_emitted(rdev, ring->idx)) { mutex_unlock(&rdev->ring_lock); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index a94f66fb3b13..7c16540c10ff 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -229,8 +229,6 @@ void radeon_test_ring_sync(struct radeon_device *rdev, { struct radeon_fence *fence1 = NULL, *fence2 = NULL; struct radeon_semaphore *semaphore = NULL; - int ridxA = radeon_ring_index(rdev, ringA); - int ridxB = radeon_ring_index(rdev, ringB); int r; r = radeon_semaphore_create(rdev, &semaphore); @@ -241,18 +239,18 @@ void radeon_test_ring_sync(struct radeon_device *rdev, r = radeon_ring_lock(rdev, ringA, 64); if (r) { - DRM_ERROR("Failed to lock ring A %d\n", ridxA); + DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); goto out_cleanup; } - radeon_semaphore_emit_wait(rdev, ridxA, semaphore); - r = radeon_fence_emit(rdev, &fence1, ridxA); + radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); + r = radeon_fence_emit(rdev, &fence1, ringA->idx); if (r) { DRM_ERROR("Failed to emit fence 1\n"); radeon_ring_unlock_undo(rdev, ringA); goto out_cleanup; } - radeon_semaphore_emit_wait(rdev, ridxA, semaphore); - r = radeon_fence_emit(rdev, &fence2, ridxA); + radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); + r = radeon_fence_emit(rdev, &fence2, ringA->idx); if (r) { DRM_ERROR("Failed to emit fence 2\n"); radeon_ring_unlock_undo(rdev, ringA); @@ -272,7 +270,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev, DRM_ERROR("Failed to lock ring B %p\n", ringB); goto out_cleanup; } - radeon_semaphore_emit_signal(rdev, ridxB, semaphore); + radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); radeon_ring_unlock_commit(rdev, ringB); r = radeon_fence_wait(fence1, false); @@ -293,7 +291,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev, DRM_ERROR("Failed to lock ring B %p\n", ringB); goto out_cleanup; } - radeon_semaphore_emit_signal(rdev, ridxB, semaphore); + radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); radeon_ring_unlock_commit(rdev, ringB); r = radeon_fence_wait(fence2, false); @@ -322,9 +320,6 @@ void radeon_test_ring_sync2(struct radeon_device *rdev, { struct radeon_fence *fenceA = NULL, *fenceB = NULL; struct radeon_semaphore *semaphore = NULL; - int ridxA = radeon_ring_index(rdev, ringA); - int ridxB = radeon_ring_index(rdev, ringB); - int ridxC = radeon_ring_index(rdev, ringC); bool sigA, sigB; int i, r; @@ -336,11 +331,11 @@ void radeon_test_ring_sync2(struct radeon_device *rdev, r = radeon_ring_lock(rdev, ringA, 64); if (r) { - DRM_ERROR("Failed to lock ring A %d\n", ridxA); + DRM_ERROR("Failed to lock ring A %d\n", ringA->idx); goto out_cleanup; } - radeon_semaphore_emit_wait(rdev, ridxA, semaphore); - r = radeon_fence_emit(rdev, &fenceA, ridxA); + radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); + r = radeon_fence_emit(rdev, &fenceA, ringA->idx); if (r) { DRM_ERROR("Failed to emit sync fence 1\n"); radeon_ring_unlock_undo(rdev, ringA); @@ -350,11 +345,11 @@ void radeon_test_ring_sync2(struct radeon_device *rdev, r = radeon_ring_lock(rdev, ringB, 64); if (r) { - DRM_ERROR("Failed to lock ring B %d\n", ridxB); + DRM_ERROR("Failed to lock ring B %d\n", ringB->idx); goto out_cleanup; } - radeon_semaphore_emit_wait(rdev, ridxB, semaphore); - r = radeon_fence_emit(rdev, &fenceB, ridxB); + radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore); + r = radeon_fence_emit(rdev, &fenceB, ringB->idx); if (r) { DRM_ERROR("Failed to create sync fence 2\n"); radeon_ring_unlock_undo(rdev, ringB); @@ -378,7 +373,7 @@ void radeon_test_ring_sync2(struct radeon_device *rdev, DRM_ERROR("Failed to lock ring B %p\n", ringC); goto out_cleanup; } - radeon_semaphore_emit_signal(rdev, ridxC, semaphore); + radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); radeon_ring_unlock_commit(rdev, ringC); for (i = 0; i < 30; ++i) { @@ -404,7 +399,7 @@ void radeon_test_ring_sync2(struct radeon_device *rdev, DRM_ERROR("Failed to lock ring B %p\n", ringC); goto out_cleanup; } - radeon_semaphore_emit_signal(rdev, ridxC, semaphore); + radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); radeon_ring_unlock_commit(rdev, ringC); mdelay(1000); -- cgit v1.2.3 From 89d35807fb0fe53b84e88e759cc39107a6195e5f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 17 Jul 2012 14:02:31 -0400 Subject: drm/radeon: update rptr saving logic for memory buffers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add support for using memory buffers rather than scratch registers. Some rings may not be able to write to scratch registers. Signed-off-by: Alex Deucher Reviewed-by: Christian König --- drivers/gpu/drm/radeon/evergreen.c | 10 +++++++++- drivers/gpu/drm/radeon/r600.c | 20 +++++++++++++++----- drivers/gpu/drm/radeon/radeon.h | 5 +++++ drivers/gpu/drm/radeon/radeon_ring.c | 31 +++++++++++++++++++++++++++++-- drivers/gpu/drm/radeon/si.c | 10 +++++++++- 5 files changed, 67 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 4b8e5c5fcf84..870009ad5f56 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1364,17 +1364,25 @@ void evergreen_mc_program(struct radeon_device *rdev) void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) { struct radeon_ring *ring = &rdev->ring[ib->ring]; + u32 next_rptr; /* set to DX10/11 mode */ radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); radeon_ring_write(ring, 1); if (ring->rptr_save_reg) { - uint32_t next_rptr = ring->wptr + 3 + 4; + next_rptr = ring->wptr + 3 + 4; radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(ring, ((ring->rptr_save_reg - PACKET3_SET_CONFIG_REG_START) >> 2)); radeon_ring_write(ring, next_rptr); + } else if (rdev->wb.enabled) { + next_rptr = ring->wptr + 5 + 4; + radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3)); + radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); + radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18)); + radeon_ring_write(ring, next_rptr); + radeon_ring_write(ring, 0); } radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 9f24a804f6ea..c5b2e9069362 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2163,10 +2163,12 @@ void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsign ring->ring_size = ring_size; ring->align_mask = 16 - 1; - r = radeon_scratch_get(rdev, &ring->rptr_save_reg); - if (r) { - DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r); - ring->rptr_save_reg = 0; + if (radeon_ring_supports_scratch_reg(rdev, ring)) { + r = radeon_scratch_get(rdev, &ring->rptr_save_reg); + if (r) { + DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r); + ring->rptr_save_reg = 0; + } } } @@ -2576,13 +2578,21 @@ void r600_fini(struct radeon_device *rdev) void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) { struct radeon_ring *ring = &rdev->ring[ib->ring]; + u32 next_rptr; if (ring->rptr_save_reg) { - uint32_t next_rptr = ring->wptr + 3 + 4; + next_rptr = ring->wptr + 3 + 4; radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(ring, ((ring->rptr_save_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); radeon_ring_write(ring, next_rptr); + } else if (rdev->wb.enabled) { + next_rptr = ring->wptr + 5 + 4; + radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3)); + radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); + radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18)); + radeon_ring_write(ring, next_rptr); + radeon_ring_write(ring, 0); } radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index be454725c377..5431af292408 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -623,6 +623,8 @@ struct radeon_ring { unsigned rptr_offs; unsigned rptr_reg; unsigned rptr_save_reg; + u64 next_rptr_gpu_addr; + volatile u32 *next_rptr_cpu_addr; unsigned wptr; unsigned wptr_old; unsigned wptr_reg; @@ -758,6 +760,8 @@ int radeon_ib_pool_init(struct radeon_device *rdev); void radeon_ib_pool_fini(struct radeon_device *rdev); int radeon_ib_ring_tests(struct radeon_device *rdev); /* Ring access between begin & end cannot sleep */ +bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev, + struct radeon_ring *ring); void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp); int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); @@ -871,6 +875,7 @@ struct radeon_wb { }; #define RADEON_WB_SCRATCH_OFFSET 0 +#define RADEON_WB_RING0_NEXT_RPTR 256 #define RADEON_WB_CP_RPTR_OFFSET 1024 #define RADEON_WB_CP1_RPTR_OFFSET 1280 #define RADEON_WB_CP2_RPTR_OFFSET 1536 diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index c5828b909024..c1efde6d53d3 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -207,6 +207,19 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v) ring->ring_free_dw--; } +bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + switch (ring->idx) { + case RADEON_RING_TYPE_GFX_INDEX: + case CAYMAN_RING_TYPE_CP1_INDEX: + case CAYMAN_RING_TYPE_CP2_INDEX: + return true; + default: + return false; + } +} + void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring) { u32 rptr; @@ -372,7 +385,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring mutex_lock(&rdev->ring_lock); *data = NULL; - if (ring->ring_obj == NULL || !ring->rptr_save_reg) { + if (ring->ring_obj == NULL) { mutex_unlock(&rdev->ring_lock); return 0; } @@ -384,7 +397,16 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring } /* calculate the number of dw on the ring */ - ptr = RREG32(ring->rptr_save_reg); + if (ring->rptr_save_reg) + ptr = RREG32(ring->rptr_save_reg); + else if (rdev->wb.enabled) + ptr = le32_to_cpu(*ring->next_rptr_cpu_addr); + else { + /* no way to read back the next rptr */ + mutex_unlock(&rdev->ring_lock); + return 0; + } + size = ring->wptr + (ring->ring_size / 4); size -= ptr; size &= ring->ptr_mask; @@ -478,6 +500,11 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig } ring->ptr_mask = (ring->ring_size / 4) - 1; ring->ring_free_dw = ring->ring_size / 4; + if (rdev->wb.enabled) { + u32 index = RADEON_WB_RING0_NEXT_RPTR + (ring->idx * 4); + ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index; + ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4]; + } if (radeon_debugfs_ring_init(rdev, ring)) { DRM_ERROR("Failed to register debugfs file for rings !\n"); } diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 2b12cae52b29..7c3e330c7b59 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -1772,12 +1772,20 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2); } else { + u32 next_rptr; if (ring->rptr_save_reg) { - uint32_t next_rptr = ring->wptr + 3 + 4 + 8; + next_rptr = ring->wptr + 3 + 4 + 8; radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(ring, ((ring->rptr_save_reg - PACKET3_SET_CONFIG_REG_START) >> 2)); radeon_ring_write(ring, next_rptr); + } else if (rdev->wb.enabled) { + next_rptr = ring->wptr + 5 + 4 + 8; + radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + radeon_ring_write(ring, (1 << 8)); + radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); + radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); + radeon_ring_write(ring, next_rptr); } header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); -- cgit v1.2.3 From c7eff978e0868ccad1ecbefcc342f6709f9f4789 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 17 Jul 2012 14:02:32 -0400 Subject: drm/radeon: add rptr save support for r1xx-r5xx MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Alex Deucher Reviewed-by: Christian König --- drivers/gpu/drm/radeon/r100.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 4ee5a74dac22..2e0a60328bd4 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -1060,6 +1060,14 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) } ring->ready = true; radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); + + if (radeon_ring_supports_scratch_reg(rdev, ring)) { + r = radeon_scratch_get(rdev, &ring->rptr_save_reg); + if (r) { + DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r); + ring->rptr_save_reg = 0; + } + } return 0; } @@ -1070,6 +1078,7 @@ void r100_cp_fini(struct radeon_device *rdev) } /* Disable ring */ r100_cp_disable(rdev); + radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg); radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); DRM_INFO("radeon: cp finalized\n"); } @@ -3661,6 +3670,12 @@ void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; + if (ring->rptr_save_reg) { + u32 next_rptr = ring->wptr + 2 + 3; + radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0)); + radeon_ring_write(ring, next_rptr); + } + radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1)); radeon_ring_write(ring, ib->gpu_addr); radeon_ring_write(ring, ib->length_dw); -- cgit v1.2.3 From 0c1951192a8855cc6185e6493078c20d97292a7f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 17 Jul 2012 14:02:33 -0400 Subject: drm/radeon: document radeon_device.c (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds documentation to most of the functions in radeon_device.c v2: split out general descriptions as per Christian's comments. Signed-off-by: Alex Deucher Reviewed-by: Christian König --- drivers/gpu/drm/radeon/radeon_device.c | 313 ++++++++++++++++++++++++++++++++- 1 file changed, 310 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 6c534f42ef7b..742af8244e89 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -96,8 +96,12 @@ static const char radeon_family_name[][16] = { "LAST", }; -/* - * Clear GPU surface registers. +/** + * radeon_surface_init - Clear GPU surface registers. + * + * @rdev: radeon_device pointer + * + * Clear GPU surface registers (r1xx-r5xx). */ void radeon_surface_init(struct radeon_device *rdev) { @@ -119,6 +123,13 @@ void radeon_surface_init(struct radeon_device *rdev) /* * GPU scratch registers helpers function. */ +/** + * radeon_scratch_init - Init scratch register driver information. + * + * @rdev: radeon_device pointer + * + * Init CP scratch register driver information (r1xx-r5xx) + */ void radeon_scratch_init(struct radeon_device *rdev) { int i; @@ -136,6 +147,15 @@ void radeon_scratch_init(struct radeon_device *rdev) } } +/** + * radeon_scratch_get - Allocate a scratch register + * + * @rdev: radeon_device pointer + * @reg: scratch register mmio offset + * + * Allocate a CP scratch register for use by the driver (all asics). + * Returns 0 on success or -EINVAL on failure. + */ int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg) { int i; @@ -150,6 +170,14 @@ int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg) return -EINVAL; } +/** + * radeon_scratch_free - Free a scratch register + * + * @rdev: radeon_device pointer + * @reg: scratch register mmio offset + * + * Free a CP scratch register allocated for use by the driver (all asics) + */ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) { int i; @@ -162,6 +190,20 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) } } +/* + * radeon_wb_*() + * Writeback is the the method by which the the GPU updates special pages + * in memory with the status of certain GPU events (fences, ring pointers, + * etc.). + */ + +/** + * radeon_wb_disable - Disable Writeback + * + * @rdev: radeon_device pointer + * + * Disables Writeback (all asics). Used for suspend. + */ void radeon_wb_disable(struct radeon_device *rdev) { int r; @@ -177,6 +219,14 @@ void radeon_wb_disable(struct radeon_device *rdev) rdev->wb.enabled = false; } +/** + * radeon_wb_fini - Disable Writeback and free memory + * + * @rdev: radeon_device pointer + * + * Disables Writeback and frees the Writeback memory (all asics). + * Used at driver shutdown. + */ void radeon_wb_fini(struct radeon_device *rdev) { radeon_wb_disable(rdev); @@ -187,6 +237,15 @@ void radeon_wb_fini(struct radeon_device *rdev) } } +/** + * radeon_wb_init- Init Writeback driver info and allocate memory + * + * @rdev: radeon_device pointer + * + * Disables Writeback and frees the Writeback memory (all asics). + * Used at driver startup. + * Returns 0 on success or an -error on failure. + */ int radeon_wb_init(struct radeon_device *rdev) { int r; @@ -355,6 +414,15 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) /* * GPU helpers function. */ +/** + * radeon_card_posted - check if the hw has already been initialized + * + * @rdev: radeon_device pointer + * + * Check if the asic has been initialized (all asics). + * Used at driver startup. + * Returns true if initialized or false if not. + */ bool radeon_card_posted(struct radeon_device *rdev) { uint32_t reg; @@ -404,6 +472,14 @@ bool radeon_card_posted(struct radeon_device *rdev) } +/** + * radeon_update_bandwidth_info - update display bandwidth params + * + * @rdev: radeon_device pointer + * + * Used when sclk/mclk are switched or display modes are set. + * params are used to calculate display watermarks (all asics) + */ void radeon_update_bandwidth_info(struct radeon_device *rdev) { fixed20_12 a; @@ -424,6 +500,15 @@ void radeon_update_bandwidth_info(struct radeon_device *rdev) } } +/** + * radeon_boot_test_post_card - check and possibly initialize the hw + * + * @rdev: radeon_device pointer + * + * Check if the asic is initialized and if not, attempt to initialize + * it (all asics). + * Returns true if initialized or false if not. + */ bool radeon_boot_test_post_card(struct radeon_device *rdev) { if (radeon_card_posted(rdev)) @@ -442,6 +527,16 @@ bool radeon_boot_test_post_card(struct radeon_device *rdev) } } +/** + * radeon_dummy_page_init - init dummy page used by the driver + * + * @rdev: radeon_device pointer + * + * Allocate the dummy page used by the driver (all asics). + * This dummy page is used by the driver as a filler for gart entries + * when pages are taken out of the GART + * Returns 0 on sucess, -ENOMEM on failure. + */ int radeon_dummy_page_init(struct radeon_device *rdev) { if (rdev->dummy_page.page) @@ -460,6 +555,13 @@ int radeon_dummy_page_init(struct radeon_device *rdev) return 0; } +/** + * radeon_dummy_page_fini - free dummy page used by the driver + * + * @rdev: radeon_device pointer + * + * Frees the dummy page used by the driver (all asics). + */ void radeon_dummy_page_fini(struct radeon_device *rdev) { if (rdev->dummy_page.page == NULL) @@ -472,6 +574,23 @@ void radeon_dummy_page_fini(struct radeon_device *rdev) /* ATOM accessor methods */ +/* + * ATOM is an interpreted byte code stored in tables in the vbios. The + * driver registers callbacks to access registers and the interpreter + * in the driver parses the tables and executes then to program specific + * actions (set display modes, asic init, etc.). See radeon_atombios.c, + * atombios.h, and atom.c + */ + +/** + * cail_pll_read - read PLL register + * + * @info: atom card_info pointer + * @reg: PLL register offset + * + * Provides a PLL register accessor for the atom interpreter (r4xx+). + * Returns the value of the PLL register. + */ static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) { struct radeon_device *rdev = info->dev->dev_private; @@ -481,6 +600,15 @@ static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) return r; } +/** + * cail_pll_write - write PLL register + * + * @info: atom card_info pointer + * @reg: PLL register offset + * @val: value to write to the pll register + * + * Provides a PLL register accessor for the atom interpreter (r4xx+). + */ static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) { struct radeon_device *rdev = info->dev->dev_private; @@ -488,6 +616,15 @@ static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) rdev->pll_wreg(rdev, reg, val); } +/** + * cail_mc_read - read MC (Memory Controller) register + * + * @info: atom card_info pointer + * @reg: MC register offset + * + * Provides an MC register accessor for the atom interpreter (r4xx+). + * Returns the value of the MC register. + */ static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) { struct radeon_device *rdev = info->dev->dev_private; @@ -497,6 +634,15 @@ static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) return r; } +/** + * cail_mc_write - write MC (Memory Controller) register + * + * @info: atom card_info pointer + * @reg: MC register offset + * @val: value to write to the pll register + * + * Provides a MC register accessor for the atom interpreter (r4xx+). + */ static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) { struct radeon_device *rdev = info->dev->dev_private; @@ -504,6 +650,15 @@ static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) rdev->mc_wreg(rdev, reg, val); } +/** + * cail_reg_write - write MMIO register + * + * @info: atom card_info pointer + * @reg: MMIO register offset + * @val: value to write to the pll register + * + * Provides a MMIO register accessor for the atom interpreter (r4xx+). + */ static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) { struct radeon_device *rdev = info->dev->dev_private; @@ -511,6 +666,15 @@ static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) WREG32(reg*4, val); } +/** + * cail_reg_read - read MMIO register + * + * @info: atom card_info pointer + * @reg: MMIO register offset + * + * Provides an MMIO register accessor for the atom interpreter (r4xx+). + * Returns the value of the MMIO register. + */ static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) { struct radeon_device *rdev = info->dev->dev_private; @@ -520,6 +684,15 @@ static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) return r; } +/** + * cail_ioreg_write - write IO register + * + * @info: atom card_info pointer + * @reg: IO register offset + * @val: value to write to the pll register + * + * Provides a IO register accessor for the atom interpreter (r4xx+). + */ static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) { struct radeon_device *rdev = info->dev->dev_private; @@ -527,6 +700,15 @@ static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) WREG32_IO(reg*4, val); } +/** + * cail_ioreg_read - read IO register + * + * @info: atom card_info pointer + * @reg: IO register offset + * + * Provides an IO register accessor for the atom interpreter (r4xx+). + * Returns the value of the IO register. + */ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) { struct radeon_device *rdev = info->dev->dev_private; @@ -536,6 +718,16 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) return r; } +/** + * radeon_atombios_init - init the driver info and callbacks for atombios + * + * @rdev: radeon_device pointer + * + * Initializes the driver info and register access callbacks for the + * ATOM interpreter (r4xx+). + * Returns 0 on sucess, -ENOMEM on failure. + * Called at driver startup. + */ int radeon_atombios_init(struct radeon_device *rdev) { struct card_info *atom_card_info = @@ -569,6 +761,15 @@ int radeon_atombios_init(struct radeon_device *rdev) return 0; } +/** + * radeon_atombios_fini - free the driver info and callbacks for atombios + * + * @rdev: radeon_device pointer + * + * Frees the driver info and register access callbacks for the ATOM + * interpreter (r4xx+). + * Called at driver shutdown. + */ void radeon_atombios_fini(struct radeon_device *rdev) { if (rdev->mode_info.atom_context) { @@ -578,17 +779,50 @@ void radeon_atombios_fini(struct radeon_device *rdev) kfree(rdev->mode_info.atom_card_info); } +/* COMBIOS */ +/* + * COMBIOS is the bios format prior to ATOM. It provides + * command tables similar to ATOM, but doesn't have a unified + * parser. See radeon_combios.c + */ + +/** + * radeon_combios_init - init the driver info for combios + * + * @rdev: radeon_device pointer + * + * Initializes the driver info for combios (r1xx-r3xx). + * Returns 0 on sucess. + * Called at driver startup. + */ int radeon_combios_init(struct radeon_device *rdev) { radeon_combios_initialize_bios_scratch_regs(rdev->ddev); return 0; } +/** + * radeon_combios_fini - free the driver info for combios + * + * @rdev: radeon_device pointer + * + * Frees the driver info for combios (r1xx-r3xx). + * Called at driver shutdown. + */ void radeon_combios_fini(struct radeon_device *rdev) { } -/* if we get transitioned to only one device, tak VGA back */ +/* if we get transitioned to only one device, take VGA back */ +/** + * radeon_vga_set_decode - enable/disable vga decode + * + * @cookie: radeon_device pointer + * @state: enable/disable vga decode + * + * Enable/disable vga decode (all asics). + * Returns VGA resource flags. + */ static unsigned int radeon_vga_set_decode(void *cookie, bool state) { struct radeon_device *rdev = cookie; @@ -600,6 +834,14 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state) return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; } +/** + * radeon_check_arguments - validate module params + * + * @rdev: radeon_device pointer + * + * Validates certain module parameters and updates + * the associated values used by the driver (all asics). + */ void radeon_check_arguments(struct radeon_device *rdev) { /* vramlimit must be a power of two */ @@ -666,6 +908,15 @@ void radeon_check_arguments(struct radeon_device *rdev) } } +/** + * radeon_switcheroo_set_state - set switcheroo state + * + * @pdev: pci dev pointer + * @state: vga switcheroo state + * + * Callback for the switcheroo driver. Suspends or resumes the + * the asics before or after it is powered up using ACPI methods. + */ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) { struct drm_device *dev = pci_get_drvdata(pdev); @@ -686,6 +937,15 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero } } +/** + * radeon_switcheroo_can_switch - see if switcheroo state can change + * + * @pdev: pci dev pointer + * + * Callback for the switcheroo driver. Check of the switcheroo + * state can be changed. + * Returns true if the state can be changed, false if not. + */ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); @@ -703,6 +963,18 @@ static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = { .can_switch = radeon_switcheroo_can_switch, }; +/** + * radeon_device_init - initialize the driver + * + * @rdev: radeon_device pointer + * @pdev: drm dev pointer + * @pdev: pci dev pointer + * @flags: driver flags + * + * Initializes the driver info and hw (all asics). + * Returns 0 for success or an error on failure. + * Called at driver startup. + */ int radeon_device_init(struct radeon_device *rdev, struct drm_device *ddev, struct pci_dev *pdev, @@ -855,6 +1127,14 @@ int radeon_device_init(struct radeon_device *rdev, static void radeon_debugfs_remove_files(struct radeon_device *rdev); +/** + * radeon_device_fini - tear down the driver + * + * @rdev: radeon_device pointer + * + * Tear down the driver info (all asics). + * Called at driver shutdown. + */ void radeon_device_fini(struct radeon_device *rdev) { DRM_INFO("radeon: finishing device.\n"); @@ -876,6 +1156,16 @@ void radeon_device_fini(struct radeon_device *rdev) /* * Suspend & resume. */ +/** + * radeon_suspend_kms - initiate device suspend + * + * @pdev: drm dev pointer + * @state: suspend state + * + * Puts the hw in the suspend state (all asics). + * Returns 0 for success or an error on failure. + * Called at driver suspend. + */ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) { struct radeon_device *rdev; @@ -950,6 +1240,15 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) return 0; } +/** + * radeon_resume_kms - initiate device resume + * + * @pdev: drm dev pointer + * + * Bring the hw back to operating state (all asics). + * Returns 0 for success or an error on failure. + * Called at driver resume. + */ int radeon_resume_kms(struct drm_device *dev) { struct drm_connector *connector; @@ -998,6 +1297,14 @@ int radeon_resume_kms(struct drm_device *dev) return 0; } +/** + * radeon_gpu_reset - reset the asic + * + * @rdev: radeon device pointer + * + * Attempt the reset the GPU if it has hung (all asics). + * Returns 0 for success or an error on failure. + */ int radeon_gpu_reset(struct radeon_device *rdev) { unsigned ring_sizes[RADEON_NUM_RINGS]; -- cgit v1.2.3 From f482a1419545ded5f4ee748b44cba42d9a885dd4 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 17 Jul 2012 14:02:34 -0400 Subject: drm/radeon: document radeon_kms.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds documentation to most of the functions in radeon_kms.c Signed-off-by: Alex Deucher Reviewed-by: Christian König --- drivers/gpu/drm/radeon/radeon_kms.c | 126 ++++++++++++++++++++++++++++++++++++ 1 file changed, 126 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 18b81d63ee7b..1d73f16b5d97 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -33,6 +33,17 @@ #include #include +/** + * radeon_driver_unload_kms - Main unload function for KMS. + * + * @dev: drm dev pointer + * + * This is the main unload function for KMS (all asics). + * It calls radeon_modeset_fini() to tear down the + * displays, and radeon_device_fini() to tear down + * the rest of the device (CP, writeback, etc.). + * Returns 0 on success. + */ int radeon_driver_unload_kms(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; @@ -46,6 +57,19 @@ int radeon_driver_unload_kms(struct drm_device *dev) return 0; } +/** + * radeon_driver_load_kms - Main load function for KMS. + * + * @dev: drm dev pointer + * @flags: device flags + * + * This is the main load function for KMS (all asics). + * It calls radeon_device_init() to set up the non-display + * parts of the chip (asic init, CP, writeback, etc.), and + * radeon_modeset_init() to set up the display parts + * (crtcs, encoders, hotplug detect, etc.). + * Returns 0 on success, error on failure. + */ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) { struct radeon_device *rdev; @@ -96,6 +120,16 @@ out: return r; } +/** + * radeon_set_filp_rights - Set filp right. + * + * @dev: drm dev pointer + * @owner: drm file + * @applier: drm file + * @value: value + * + * Sets the filp rights for the device (all asics). + */ static void radeon_set_filp_rights(struct drm_device *dev, struct drm_file **owner, struct drm_file *applier, @@ -118,6 +152,18 @@ static void radeon_set_filp_rights(struct drm_device *dev, /* * Userspace get information ioctl */ +/** + * radeon_info_ioctl - answer a device specific request. + * + * @rdev: radeon device pointer + * @data: request object + * @filp: drm filp + * + * This function is used to pass device specific parameters to the userspace + * drivers. Examples include: pci device id, pipeline parms, tiling params, + * etc. (all asics). + * Returns 0 on success, -EINVAL on failure. + */ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct radeon_device *rdev = dev->dev_private; @@ -301,16 +347,40 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) /* * Outdated mess for old drm with Xorg being in charge (void function now). */ +/** + * radeon_driver_firstopen_kms - drm callback for first open + * + * @dev: drm dev pointer + * + * Nothing to be done for KMS (all asics). + * Returns 0 on success. + */ int radeon_driver_firstopen_kms(struct drm_device *dev) { return 0; } +/** + * radeon_driver_firstopen_kms - drm callback for last close + * + * @dev: drm dev pointer + * + * Switch vga switcheroo state after last close (all asics). + */ void radeon_driver_lastclose_kms(struct drm_device *dev) { vga_switcheroo_process_delayed_switch(); } +/** + * radeon_driver_open_kms - drm callback for open + * + * @dev: drm dev pointer + * @file_priv: drm file + * + * On device open, init vm on cayman+ (all asics). + * Returns 0 on success, error on failure. + */ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) { struct radeon_device *rdev = dev->dev_private; @@ -339,6 +409,14 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) return 0; } +/** + * radeon_driver_postclose_kms - drm callback for post close + * + * @dev: drm dev pointer + * @file_priv: drm file + * + * On device post close, tear down vm on cayman+ (all asics). + */ void radeon_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv) { @@ -354,6 +432,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev, } } +/** + * radeon_driver_preclose_kms - drm callback for pre close + * + * @dev: drm dev pointer + * @file_priv: drm file + * + * On device pre close, tear down hyperz and cmask filps on r1xx-r5xx + * (all asics). + */ void radeon_driver_preclose_kms(struct drm_device *dev, struct drm_file *file_priv) { @@ -367,6 +454,15 @@ void radeon_driver_preclose_kms(struct drm_device *dev, /* * VBlank related functions. */ +/** + * radeon_get_vblank_counter_kms - get frame count + * + * @dev: drm dev pointer + * @crtc: crtc to get the frame count from + * + * Gets the frame count on the requested crtc (all asics). + * Returns frame count on success, -EINVAL on failure. + */ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) { struct radeon_device *rdev = dev->dev_private; @@ -379,6 +475,15 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) return radeon_get_vblank_counter(rdev, crtc); } +/** + * radeon_enable_vblank_kms - enable vblank interrupt + * + * @dev: drm dev pointer + * @crtc: crtc to enable vblank interrupt for + * + * Enable the interrupt on the requested crtc (all asics). + * Returns 0 on success, -EINVAL on failure. + */ int radeon_enable_vblank_kms(struct drm_device *dev, int crtc) { struct radeon_device *rdev = dev->dev_private; @@ -397,6 +502,14 @@ int radeon_enable_vblank_kms(struct drm_device *dev, int crtc) return r; } +/** + * radeon_disable_vblank_kms - disable vblank interrupt + * + * @dev: drm dev pointer + * @crtc: crtc to disable vblank interrupt for + * + * Disable the interrupt on the requested crtc (all asics). + */ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) { struct radeon_device *rdev = dev->dev_private; @@ -413,6 +526,19 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) spin_unlock_irqrestore(&rdev->irq.lock, irqflags); } +/** + * radeon_get_vblank_timestamp_kms - get vblank timestamp + * + * @dev: drm dev pointer + * @crtc: crtc to get the timestamp for + * @max_error: max error + * @vblank_time: time value + * @flags: flags passed to the driver + * + * Gets the timestamp on the requested crtc based on the + * scanout position. (all asics). + * Returns postive status flags on success, negative error on failure. + */ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, int *max_error, struct timeval *vblank_time, -- cgit v1.2.3 From b73ba98dcb9f8e148773c3810a522440484fe05a Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 17 Jul 2012 14:02:35 -0400 Subject: drm/radeon: document radeon_irq_kms.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds documentation to most of the functions in radeon_irq_kms.c Signed-off-by: Alex Deucher Reviewed-by: Christian König --- drivers/gpu/drm/radeon/radeon_irq_kms.c | 150 ++++++++++++++++++++++++++++++++ 1 file changed, 150 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 6664514bbdca..afaa1727abd2 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -34,6 +34,15 @@ #define RADEON_WAIT_IDLE_TIMEOUT 200 +/** + * radeon_driver_irq_handler_kms - irq handler for KMS + * + * @DRM_IRQ_ARGS: args + * + * This is the irq handler for the radeon KMS driver (all asics). + * radeon_irq_process is a macro that points to the per-asic + * irq handler callback. + */ irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; @@ -45,6 +54,17 @@ irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS) /* * Handle hotplug events outside the interrupt handler proper. */ +/** + * radeon_hotplug_work_func - display hotplug work handler + * + * @work: work struct + * + * This is the hot plug event work handler (all asics). + * The work gets scheduled from the irq handler if there + * was a hot plug interrupt. It walks the connector table + * and calls the hotplug handler for each one, then sends + * a drm hotplug event to alert userspace. + */ static void radeon_hotplug_work_func(struct work_struct *work) { struct radeon_device *rdev = container_of(work, struct radeon_device, @@ -61,6 +81,14 @@ static void radeon_hotplug_work_func(struct work_struct *work) drm_helper_hpd_irq_event(dev); } +/** + * radeon_driver_irq_preinstall_kms - drm irq preinstall callback + * + * @dev: drm dev pointer + * + * Gets the hw ready to enable irqs (all asics). + * This function disables all interrupt sources on the GPU. + */ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; @@ -85,12 +113,27 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) radeon_irq_process(rdev); } +/** + * radeon_driver_irq_postinstall_kms - drm irq preinstall callback + * + * @dev: drm dev pointer + * + * Handles stuff to be done after enabling irqs (all asics). + * Returns 0 on success. + */ int radeon_driver_irq_postinstall_kms(struct drm_device *dev) { dev->max_vblank_count = 0x001fffff; return 0; } +/** + * radeon_driver_irq_uninstall_kms - drm irq uninstall callback + * + * @dev: drm dev pointer + * + * This function disables all interrupt sources on the GPU (all asics). + */ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; @@ -116,6 +159,16 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) spin_unlock_irqrestore(&rdev->irq.lock, irqflags); } +/** + * radeon_msi_ok - asic specific msi checks + * + * @rdev: radeon device pointer + * + * Handles asic specific MSI checks to determine if + * MSIs should be enabled on a particular chip (all asics). + * Returns true if MSIs should be enabled, false if MSIs + * should not be enabled. + */ static bool radeon_msi_ok(struct radeon_device *rdev) { /* RV370/RV380 was first asic with MSI support */ @@ -168,6 +221,14 @@ static bool radeon_msi_ok(struct radeon_device *rdev) return true; } +/** + * radeon_irq_kms_init - init driver interrupt info + * + * @rdev: radeon device pointer + * + * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics). + * Returns 0 for success, error for failure. + */ int radeon_irq_kms_init(struct radeon_device *rdev) { int r = 0; @@ -200,6 +261,13 @@ int radeon_irq_kms_init(struct radeon_device *rdev) return 0; } +/** + * radeon_irq_kms_fini - tear down driver interrrupt info + * + * @rdev: radeon device pointer + * + * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics). + */ void radeon_irq_kms_fini(struct radeon_device *rdev) { drm_vblank_cleanup(rdev->ddev); @@ -212,6 +280,16 @@ void radeon_irq_kms_fini(struct radeon_device *rdev) flush_work_sync(&rdev->hotplug_work); } +/** + * radeon_irq_kms_sw_irq_get - enable software interrupt + * + * @rdev: radeon device pointer + * @ring: ring whose interrupt you want to enable + * + * Enables the software interrupt for a specific ring (all asics). + * The software interrupt is generally used to signal a fence on + * a particular ring. + */ void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring) { unsigned long irqflags; @@ -226,6 +304,16 @@ void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring) } } +/** + * radeon_irq_kms_sw_irq_put - disable software interrupt + * + * @rdev: radeon device pointer + * @ring: ring whose interrupt you want to disable + * + * Disables the software interrupt for a specific ring (all asics). + * The software interrupt is generally used to signal a fence on + * a particular ring. + */ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring) { unsigned long irqflags; @@ -240,6 +328,15 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring) } } +/** + * radeon_irq_kms_pflip_irq_get - enable pageflip interrupt + * + * @rdev: radeon device pointer + * @crtc: crtc whose interrupt you want to enable + * + * Enables the pageflip interrupt for a specific crtc (all asics). + * For pageflips we use the vblank interrupt source. + */ void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc) { unsigned long irqflags; @@ -257,6 +354,15 @@ void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc) } } +/** + * radeon_irq_kms_pflip_irq_put - disable pageflip interrupt + * + * @rdev: radeon device pointer + * @crtc: crtc whose interrupt you want to disable + * + * Disables the pageflip interrupt for a specific crtc (all asics). + * For pageflips we use the vblank interrupt source. + */ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc) { unsigned long irqflags; @@ -274,6 +380,14 @@ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc) } } +/** + * radeon_irq_kms_enable_afmt - enable audio format change interrupt + * + * @rdev: radeon device pointer + * @block: afmt block whose interrupt you want to enable + * + * Enables the afmt change interrupt for a specific afmt block (all asics). + */ void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block) { unsigned long irqflags; @@ -285,6 +399,14 @@ void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block) } +/** + * radeon_irq_kms_disable_afmt - disable audio format change interrupt + * + * @rdev: radeon device pointer + * @block: afmt block whose interrupt you want to disable + * + * Disables the afmt change interrupt for a specific afmt block (all asics). + */ void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block) { unsigned long irqflags; @@ -295,6 +417,14 @@ void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block) spin_unlock_irqrestore(&rdev->irq.lock, irqflags); } +/** + * radeon_irq_kms_enable_hpd - enable hotplug detect interrupt + * + * @rdev: radeon device pointer + * @hpd_mask: mask of hpd pins you want to enable. + * + * Enables the hotplug detect interrupt for a specific hpd pin (all asics). + */ void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask) { unsigned long irqflags; @@ -307,6 +437,14 @@ void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask) spin_unlock_irqrestore(&rdev->irq.lock, irqflags); } +/** + * radeon_irq_kms_disable_hpd - disable hotplug detect interrupt + * + * @rdev: radeon device pointer + * @hpd_mask: mask of hpd pins you want to disable. + * + * Disables the hotplug detect interrupt for a specific hpd pin (all asics). + */ void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask) { unsigned long irqflags; @@ -319,6 +457,18 @@ void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask) spin_unlock_irqrestore(&rdev->irq.lock, irqflags); } +/** + * radeon_irq_kms_wait_gui_idle - waits for drawing engine to be idle + * + * @rdev: radeon device pointer + * + * Enabled the GUI idle interrupt and waits for it to fire (r6xx+). + * This is currently used to make sure the 3D engine is idle for power + * management, but should be replaces with proper fence waits. + * GUI idle interrupts don't work very well on pre-r6xx hw and it also + * does not take into account other aspects of the chip that may be busy. + * DO NOT USE GOING FORWARD. + */ int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev) { unsigned long irqflags; -- cgit v1.2.3 From abf1dc67cbab543636dd3c9aba60fd529628debe Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 17 Jul 2012 14:02:36 -0400 Subject: drm/radeon: document radeon_asic.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds documentation to most of the functions in radeon_asic.c Signed-off-by: Alex Deucher Reviewed-by: Christian König --- drivers/gpu/drm/radeon/radeon_asic.c | 46 ++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index f533df5f7d50..973417c4b014 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -40,6 +40,16 @@ /* * Registers accessors functions. */ +/** + * radeon_invalid_rreg - dummy reg read function + * + * @rdev: radeon device pointer + * @reg: offset of register + * + * Dummy register read function. Used for register blocks + * that certain asics don't have (all asics). + * Returns the value in the register. + */ static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg) { DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); @@ -47,6 +57,16 @@ static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg) return 0; } +/** + * radeon_invalid_wreg - dummy reg write function + * + * @rdev: radeon device pointer + * @reg: offset of register + * @v: value to write to the register + * + * Dummy register read function. Used for register blocks + * that certain asics don't have (all asics). + */ static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) { DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", @@ -54,6 +74,14 @@ static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32 BUG_ON(1); } +/** + * radeon_register_accessor_init - sets up the register accessor callbacks + * + * @rdev: radeon device pointer + * + * Sets up the register accessor callbacks for various register + * apertures. Not all asics have all apertures (all asics). + */ static void radeon_register_accessor_init(struct radeon_device *rdev) { rdev->mc_rreg = &radeon_invalid_rreg; @@ -102,6 +130,14 @@ static void radeon_register_accessor_init(struct radeon_device *rdev) /* helper to disable agp */ +/** + * radeon_agp_disable - AGP disable helper function + * + * @rdev: radeon device pointer + * + * Removes AGP flags and changes the gart callbacks on AGP + * cards when using the internal gart rather than AGP (all asics). + */ void radeon_agp_disable(struct radeon_device *rdev) { rdev->flags &= ~RADEON_IS_AGP; @@ -1608,6 +1644,16 @@ static struct radeon_asic si_asic = { }, }; +/** + * radeon_asic_init - register asic specific callbacks + * + * @rdev: radeon device pointer + * + * Registers the appropriate asic specific callbacks for each + * chip family. Also sets other asics specific info like the number + * of crtcs and the register aperture accessors (all asics). + * Returns 0 for success. + */ int radeon_asic_init(struct radeon_device *rdev) { radeon_register_accessor_init(rdev); -- cgit v1.2.3 From d66b7ec24adce867890c57bf2a54323accc73a7e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 17 Jul 2012 14:02:37 -0400 Subject: drm/radeon: document radeon_fence.c (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds documentation to most of the functions in radeon_fence.c v2: address Christian's comments: - split common concept description into it's own comment - fix description of intr parameter - Improve description of -EDEADLK error Signed-off-by: Alex Deucher Reviewed-by: Christian König --- drivers/gpu/drm/radeon/radeon_fence.c | 242 +++++++++++++++++++++++++++++++++- 1 file changed, 240 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 7a181c386cbd..7b737b9339ad 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -40,6 +40,26 @@ #include "radeon.h" #include "radeon_trace.h" +/* + * Fences + * Fences mark an event in the GPUs pipeline and are used + * for GPU/CPU synchronization. When the fence is written, + * it is expected that all buffers associated with that fence + * are no longer in use by the associated ring on the GPU and + * that the the relevant GPU caches have been flushed. Whether + * we use a scratch register or memory location depends on the asic + * and whether writeback is enabled. + */ + +/** + * radeon_fence_write - write a fence value + * + * @rdev: radeon_device pointer + * @seq: sequence number to write + * @ring: ring index the fence is associated with + * + * Writes a fence value to memory or a scratch register (all asics). + */ static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) { struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; @@ -50,6 +70,15 @@ static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) } } +/** + * radeon_fence_read - read a fence value + * + * @rdev: radeon_device pointer + * @ring: ring index the fence is associated with + * + * Reads a fence value from memory or a scratch register (all asics). + * Returns the value of the fence read from memory or register. + */ static u32 radeon_fence_read(struct radeon_device *rdev, int ring) { struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; @@ -63,6 +92,16 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring) return seq; } +/** + * radeon_fence_emit - emit a fence on the requested ring + * + * @rdev: radeon_device pointer + * @fence: radeon fence object + * @ring: ring index the fence is associated with + * + * Emits a fence command on the requested ring (all asics). + * Returns 0 on success, -ENOMEM on failure. + */ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring) @@ -81,6 +120,15 @@ int radeon_fence_emit(struct radeon_device *rdev, return 0; } +/** + * radeon_fence_process - process a fence + * + * @rdev: radeon_device pointer + * @ring: ring index the fence is associated with + * + * Checks the current fence value and wakes the fence queue + * if the sequence number has increased (all asics). + */ void radeon_fence_process(struct radeon_device *rdev, int ring) { uint64_t seq, last_seq; @@ -141,6 +189,13 @@ void radeon_fence_process(struct radeon_device *rdev, int ring) } } +/** + * radeon_fence_destroy - destroy a fence + * + * @kref: fence kref + * + * Frees the fence object (all asics). + */ static void radeon_fence_destroy(struct kref *kref) { struct radeon_fence *fence; @@ -149,6 +204,20 @@ static void radeon_fence_destroy(struct kref *kref) kfree(fence); } +/** + * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled + * + * @rdev: radeon device pointer + * @seq: sequence number + * @ring: ring index the fence is associated with + * + * Check if the last singled fence sequnce number is >= the requested + * sequence number (all asics). + * Returns true if the fence has signaled (current fence value + * is >= requested value) or false if it has not (current fence + * value is < the requested value. Helper function for + * radeon_fence_signaled(). + */ static bool radeon_fence_seq_signaled(struct radeon_device *rdev, u64 seq, unsigned ring) { @@ -163,6 +232,14 @@ static bool radeon_fence_seq_signaled(struct radeon_device *rdev, return false; } +/** + * radeon_fence_signaled - check if a fence has signaled + * + * @fence: radeon fence object + * + * Check if the requested fence has signaled (all asics). + * Returns true if the fence has signaled or false if it has not. + */ bool radeon_fence_signaled(struct radeon_fence *fence) { if (!fence) { @@ -178,6 +255,24 @@ bool radeon_fence_signaled(struct radeon_fence *fence) return false; } +/** + * radeon_fence_wait_seq - wait for a specific sequence number + * + * @rdev: radeon device pointer + * @target_seq: sequence number we want to wait for + * @ring: ring index the fence is associated with + * @intr: use interruptable sleep + * @lock_ring: whether the ring should be locked or not + * + * Wait for the requested sequence number to be written (all asics). + * @intr selects whether to use interruptable (true) or non-interruptable + * (false) sleep when waiting for the sequence number. Helper function + * for radeon_fence_wait(), et al. + * Returns 0 if the sequence number has passed, error for all other cases. + * -EDEADLK is returned when a GPU lockup has been detected and the ring is + * marked as not ready so no further jobs get scheduled until a successful + * reset. + */ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, unsigned ring, bool intr, bool lock_ring) { @@ -273,6 +368,17 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, return 0; } +/** + * radeon_fence_wait - wait for a fence to signal + * + * @fence: radeon fence object + * @intr: use interruptable sleep + * + * Wait for the requested fence to signal (all asics). + * @intr selects whether to use interruptable (true) or non-interruptable + * (false) sleep when waiting for the fence. + * Returns 0 if the fence has passed, error for all other cases. + */ int radeon_fence_wait(struct radeon_fence *fence, bool intr) { int r; @@ -303,6 +409,20 @@ bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) return false; } +/** + * radeon_fence_wait_any_seq - wait for a sequence number on any ring + * + * @rdev: radeon device pointer + * @target_seq: sequence number(s) we want to wait for + * @intr: use interruptable sleep + * + * Wait for the requested sequence number(s) to be written by any ring + * (all asics). Sequnce number array is indexed by ring id. + * @intr selects whether to use interruptable (true) or non-interruptable + * (false) sleep when waiting for the sequence number. Helper function + * for radeon_fence_wait_any(), et al. + * Returns 0 if the sequence number has passed, error for all other cases. + */ static int radeon_fence_wait_any_seq(struct radeon_device *rdev, u64 *target_seq, bool intr) { @@ -412,6 +532,19 @@ static int radeon_fence_wait_any_seq(struct radeon_device *rdev, return 0; } +/** + * radeon_fence_wait_any - wait for a fence to signal on any ring + * + * @rdev: radeon device pointer + * @fences: radeon fence object(s) + * @intr: use interruptable sleep + * + * Wait for any requested fence to signal (all asics). Fence + * array is indexed by ring id. @intr selects whether to use + * interruptable (true) or non-interruptable (false) sleep when + * waiting for the fences. Used by the suballocator. + * Returns 0 if any fence has passed, error for all other cases. + */ int radeon_fence_wait_any(struct radeon_device *rdev, struct radeon_fence **fences, bool intr) @@ -442,7 +575,16 @@ int radeon_fence_wait_any(struct radeon_device *rdev, return 0; } -/* caller must hold ring lock */ +/** + * radeon_fence_wait_next_locked - wait for the next fence to signal + * + * @rdev: radeon device pointer + * @ring: ring index the fence is associated with + * + * Wait for the next fence on the requested ring to signal (all asics). + * Returns 0 if the next fence has passed, error for all other cases. + * Caller must hold ring lock. + */ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) { uint64_t seq; @@ -456,7 +598,16 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) return radeon_fence_wait_seq(rdev, seq, ring, false, false); } -/* caller must hold ring lock */ +/** + * radeon_fence_wait_empty_locked - wait for all fences to signal + * + * @rdev: radeon device pointer + * @ring: ring index the fence is associated with + * + * Wait for all fences on the requested ring to signal (all asics). + * Returns 0 if the fences have passed, error for all other cases. + * Caller must hold ring lock. + */ void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) { uint64_t seq = rdev->fence_drv[ring].sync_seq[ring]; @@ -479,12 +630,27 @@ void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) } } +/** + * radeon_fence_ref - take a ref on a fence + * + * @fence: radeon fence object + * + * Take a reference on a fence (all asics). + * Returns the fence. + */ struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) { kref_get(&fence->kref); return fence; } +/** + * radeon_fence_unref - remove a ref on a fence + * + * @fence: radeon fence object + * + * Remove a reference on a fence (all asics). + */ void radeon_fence_unref(struct radeon_fence **fence) { struct radeon_fence *tmp = *fence; @@ -495,6 +661,16 @@ void radeon_fence_unref(struct radeon_fence **fence) } } +/** + * radeon_fence_count_emitted - get the count of emitted fences + * + * @rdev: radeon device pointer + * @ring: ring index the fence is associated with + * + * Get the number of fences emitted on the requested ring (all asics). + * Returns the number of emitted fences on the ring. Used by the + * dynpm code to ring track activity. + */ unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) { uint64_t emitted; @@ -512,6 +688,17 @@ unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) return (unsigned)emitted; } +/** + * radeon_fence_need_sync - do we need a semaphore + * + * @fence: radeon fence object + * @dst_ring: which ring to check against + * + * Check if the fence needs to be synced against another ring + * (all asics). If so, we need to emit a semaphore. + * Returns true if we need to sync with another ring, false if + * not. + */ bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring) { struct radeon_fence_driver *fdrv; @@ -533,6 +720,15 @@ bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring) return true; } +/** + * radeon_fence_note_sync - record the sync point + * + * @fence: radeon fence object + * @dst_ring: which ring to check against + * + * Note the sequence number at which point the fence will + * be synced with the requested ring (all asics). + */ void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring) { struct radeon_fence_driver *dst, *src; @@ -557,6 +753,18 @@ void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring) } } +/** + * radeon_fence_driver_start_ring - make the fence driver + * ready for use on the requested ring. + * + * @rdev: radeon device pointer + * @ring: ring index to start the fence driver on + * + * Make the fence driver ready for processing (all asics). + * Not all asics have all rings, so each asic will only + * start the fence driver on the rings it has. + * Returns 0 for success, errors for failure. + */ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) { uint64_t index; @@ -585,6 +793,16 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) return 0; } +/** + * radeon_fence_driver_init_ring - init the fence driver + * for the requested ring. + * + * @rdev: radeon device pointer + * @ring: ring index to start the fence driver on + * + * Init the fence driver for the requested ring (all asics). + * Helper function for radeon_fence_driver_init(). + */ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) { int i; @@ -599,6 +817,18 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) rdev->fence_drv[ring].initialized = false; } +/** + * radeon_fence_driver_init - init the fence driver + * for all possible rings. + * + * @rdev: radeon device pointer + * + * Init the fence driver for all possible rings (all asics). + * Not all asics have all rings, so each asic will only + * start the fence driver on the rings it has using + * radeon_fence_driver_start_ring(). + * Returns 0 for success. + */ int radeon_fence_driver_init(struct radeon_device *rdev) { int ring; @@ -613,6 +843,14 @@ int radeon_fence_driver_init(struct radeon_device *rdev) return 0; } +/** + * radeon_fence_driver_fini - tear down the fence driver + * for all possible rings. + * + * @rdev: radeon device pointer + * + * Tear down the fence driver for all possible rings (all asics). + */ void radeon_fence_driver_fini(struct radeon_device *rdev) { int ring; -- cgit v1.2.3 From 75923280e580b76463a5572da457faa994a8b31d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 17 Jul 2012 14:02:38 -0400 Subject: drm/radeon: document radeon_ring.c (v4) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds documentation to most of the functions in radeon_ring.c v2: adjust per Christian's suggestions v3: adjust per Christian's latest patches v4: adjust per my latest changes Signed-off-by: Alex Deucher Reviewed-by: Christian König --- drivers/gpu/drm/radeon/radeon_ring.c | 208 ++++++++++++++++++++++++++++++++++- 1 file changed, 205 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index c1efde6d53d3..f2fc25de0b2f 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -35,10 +35,28 @@ #include "atom.h" /* - * IB. + * IB + * IBs (Indirect Buffers) and areas of GPU accessible memory where + * commands are stored. You can put a pointer to the IB in the + * command ring and the hw will fetch the commands from the IB + * and execute them. Generally userspace acceleration drivers + * produce command buffers which are send to the kernel and + * put in IBs for execution by the requested ring. */ int radeon_debugfs_sa_init(struct radeon_device *rdev); +/** + * radeon_ib_get - request an IB (Indirect Buffer) + * + * @rdev: radeon_device pointer + * @ring: ring index the IB is associated with + * @ib: IB object returned + * @size: requested IB size + * + * Request an IB (all asics). IBs are allocated using the + * suballocator. + * Returns 0 on success, error on failure. + */ int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib *ib, unsigned size) { @@ -67,6 +85,14 @@ int radeon_ib_get(struct radeon_device *rdev, int ring, return 0; } +/** + * radeon_ib_free - free an IB (Indirect Buffer) + * + * @rdev: radeon_device pointer + * @ib: IB object to free + * + * Free an IB (all asics). + */ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) { radeon_semaphore_free(rdev, &ib->semaphore, ib->fence); @@ -74,6 +100,26 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) radeon_fence_unref(&ib->fence); } +/** + * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring + * + * @rdev: radeon_device pointer + * @ib: IB object to schedule + * @const_ib: Const IB to schedule (SI only) + * + * Schedule an IB on the associated ring (all asics). + * Returns 0 on success, error on failure. + * + * On SI, there are two parallel engines fed from the primary ring, + * the CE (Constant Engine) and the DE (Drawing Engine). Since + * resource descriptors have moved to memory, the CE allows you to + * prime the caches while the DE is updating register state so that + * the resource descriptors will be already in cache when the draw is + * processed. To accomplish this, the userspace driver submits two + * IBs, one for the CE and one for the DE. If there is a CE IB (called + * a CONST_IB), it will be put on the ring prior to the DE IB. Prior + * to SI there was just a DE IB. + */ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, struct radeon_ib *const_ib) { @@ -124,6 +170,15 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, return 0; } +/** + * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool + * + * @rdev: radeon_device pointer + * + * Initialize the suballocator to manage a pool of memory + * for use as IBs (all asics). + * Returns 0 on success, error on failure. + */ int radeon_ib_pool_init(struct radeon_device *rdev) { int r; @@ -150,6 +205,14 @@ int radeon_ib_pool_init(struct radeon_device *rdev) return 0; } +/** + * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool + * + * @rdev: radeon_device pointer + * + * Tear down the suballocator managing the pool of memory + * for use as IBs (all asics). + */ void radeon_ib_pool_fini(struct radeon_device *rdev) { if (rdev->ib_pool_ready) { @@ -159,6 +222,16 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) } } +/** + * radeon_ib_ring_tests - test IBs on the rings + * + * @rdev: radeon_device pointer + * + * Test an IB (Indirect Buffer) on each ring. + * If the test fails, disable the ring. + * Returns 0 on success, error if the primary GFX ring + * IB test fails. + */ int radeon_ib_ring_tests(struct radeon_device *rdev) { unsigned i; @@ -190,10 +263,28 @@ int radeon_ib_ring_tests(struct radeon_device *rdev) } /* - * Ring. + * Rings + * Most engines on the GPU are fed via ring buffers. Ring + * buffers are areas of GPU accessible memory that the host + * writes commands into and the GPU reads commands out of. + * There is a rptr (read pointer) that determines where the + * GPU is currently reading, and a wptr (write pointer) + * which determines where the host has written. When the + * pointers are equal, the ring is idle. When the host + * writes commands to the ring buffer, it increments the + * wptr. The GPU then starts fetching commands and executes + * them until the pointers are equal again. */ int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring); +/** + * radeon_ring_write - write a value to the ring + * + * @ring: radeon_ring structure holding ring information + * @v: dword (dw) value to write + * + * Write a value to the requested ring buffer (all asics). + */ void radeon_ring_write(struct radeon_ring *ring, uint32_t v) { #if DRM_DEBUG_CODE @@ -207,6 +298,16 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v) ring->ring_free_dw--; } +/** + * radeon_ring_supports_scratch_reg - check if the ring supports + * writing to scratch registers + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * + * Check if a specific ring supports writing to scratch registers (all asics). + * Returns true if the ring supports writing to scratch regs, false if not. + */ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev, struct radeon_ring *ring) { @@ -220,6 +321,14 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev, } } +/** + * radeon_ring_free_size - update the free size + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * + * Update the free dw slots in the ring buffer (all asics). + */ void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring) { u32 rptr; @@ -238,7 +347,16 @@ void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring) } } - +/** + * radeon_ring_alloc - allocate space on the ring buffer + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * @ndw: number of dwords to allocate in the ring buffer + * + * Allocate @ndw dwords in the ring buffer (all asics). + * Returns 0 on success, error on failure. + */ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) { int r; @@ -260,6 +378,17 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi return 0; } +/** + * radeon_ring_lock - lock the ring and allocate space on it + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * @ndw: number of dwords to allocate in the ring buffer + * + * Lock the ring and allocate @ndw dwords in the ring buffer + * (all asics). + * Returns 0 on success, error on failure. + */ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) { int r; @@ -273,6 +402,16 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig return 0; } +/** + * radeon_ring_commit - tell the GPU to execute the new + * commands on the ring buffer + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * + * Update the wptr (write pointer) to tell the GPU to + * execute new commands on the ring buffer (all asics). + */ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) { /* We pad to match fetch size */ @@ -284,23 +423,55 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) (void)RREG32(ring->wptr_reg); } +/** + * radeon_ring_unlock_commit - tell the GPU to execute the new + * commands on the ring buffer and unlock it + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * + * Call radeon_ring_commit() then unlock the ring (all asics). + */ void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring) { radeon_ring_commit(rdev, ring); mutex_unlock(&rdev->ring_lock); } +/** + * radeon_ring_undo - reset the wptr + * + * @ring: radeon_ring structure holding ring information + * + * Reset the driver's copy of the wtpr (all asics). + */ void radeon_ring_undo(struct radeon_ring *ring) { ring->wptr = ring->wptr_old; } +/** + * radeon_ring_unlock_undo - reset the wptr and unlock the ring + * + * @ring: radeon_ring structure holding ring information + * + * Call radeon_ring_undo() then unlock the ring (all asics). + */ void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring) { radeon_ring_undo(ring); mutex_unlock(&rdev->ring_lock); } +/** + * radeon_ring_force_activity - add some nop packets to the ring + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * + * Add some nop packets to the ring to force activity (all asics). + * Used for lockup detection to see if the rptr is advancing. + */ void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring) { int r; @@ -315,6 +486,13 @@ void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring * } } +/** + * radeon_ring_force_activity - update lockup variables + * + * @ring: radeon_ring structure holding ring information + * + * Update the last rptr value and timestamp (all asics). + */ void radeon_ring_lockup_update(struct radeon_ring *ring) { ring->last_rptr = ring->rptr; @@ -458,6 +636,22 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, return 0; } +/** + * radeon_ring_init - init driver ring struct. + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * @ring_size: size of the ring + * @rptr_offs: offset of the rptr writeback location in the WB buffer + * @rptr_reg: MMIO offset of the rptr register + * @wptr_reg: MMIO offset of the wptr register + * @ptr_reg_shift: bit offset of the rptr/wptr values + * @ptr_reg_mask: bit mask of the rptr/wptr values + * @nop: nop packet for this ring + * + * Initialize the driver information for the selected ring (all asics). + * Returns 0 on success, error on failure. + */ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size, unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop) @@ -511,6 +705,14 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig return 0; } +/** + * radeon_ring_fini - tear down the driver ring struct. + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * + * Tear down the driver information for the selected ring (all asics). + */ void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring) { int r; -- cgit v1.2.3 From 03eec93bbc944ad4e467e083dd768d92d00213f0 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 17 Jul 2012 14:02:39 -0400 Subject: drm/radeon: document non-VM functions in radeon_gart.c (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Document the non-VM functions in radeon_gart.c v2: adjust per Christian's suggestions Signed-off-by: Alex Deucher Reviewed-by: Christian König --- drivers/gpu/drm/radeon/radeon_gart.c | 125 ++++++++++++++++++++++++++++++++++- 1 file changed, 122 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 56752da4b816..12135ec94e8d 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -30,9 +30,39 @@ #include "radeon.h" #include "radeon_reg.h" +/* + * GART + * The GART (Graphics Aperture Remapping Table) is an aperture + * in the GPU's address space. System pages can be mapped into + * the aperture and look like contiguous pages from the GPU's + * perspective. A page table maps the pages in the aperture + * to the actual backing pages in system memory. + * + * Radeon GPUs support both an internal GART, as described above, + * and AGP. AGP works similarly, but the GART table is configured + * and maintained by the northbridge rather than the driver. + * Radeon hw has a separate AGP aperture that is programmed to + * point to the AGP aperture provided by the northbridge and the + * requests are passed through to the northbridge aperture. + * Both AGP and internal GART can be used at the same time, however + * that is not currently supported by the driver. + * + * This file handles the common internal GART management. + */ + /* * Common GART table functions. */ +/** + * radeon_gart_table_ram_alloc - allocate system ram for gart page table + * + * @rdev: radeon_device pointer + * + * Allocate system memory for GART page table + * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the + * gart table to be in system memory. + * Returns 0 for success, -ENOMEM for failure. + */ int radeon_gart_table_ram_alloc(struct radeon_device *rdev) { void *ptr; @@ -54,6 +84,15 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev) return 0; } +/** + * radeon_gart_table_ram_free - free system ram for gart page table + * + * @rdev: radeon_device pointer + * + * Free system memory for GART page table + * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the + * gart table to be in system memory. + */ void radeon_gart_table_ram_free(struct radeon_device *rdev) { if (rdev->gart.ptr == NULL) { @@ -73,6 +112,16 @@ void radeon_gart_table_ram_free(struct radeon_device *rdev) rdev->gart.table_addr = 0; } +/** + * radeon_gart_table_vram_alloc - allocate vram for gart page table + * + * @rdev: radeon_device pointer + * + * Allocate video memory for GART page table + * (pcie r4xx, r5xx+). These asics require the + * gart table to be in video memory. + * Returns 0 for success, error for failure. + */ int radeon_gart_table_vram_alloc(struct radeon_device *rdev) { int r; @@ -88,6 +137,16 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev) return 0; } +/** + * radeon_gart_table_vram_pin - pin gart page table in vram + * + * @rdev: radeon_device pointer + * + * Pin the GART page table in vram so it will not be moved + * by the memory manager (pcie r4xx, r5xx+). These asics require the + * gart table to be in video memory. + * Returns 0 for success, error for failure. + */ int radeon_gart_table_vram_pin(struct radeon_device *rdev) { uint64_t gpu_addr; @@ -110,6 +169,14 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev) return r; } +/** + * radeon_gart_table_vram_unpin - unpin gart page table in vram + * + * @rdev: radeon_device pointer + * + * Unpin the GART page table in vram (pcie r4xx, r5xx+). + * These asics require the gart table to be in video memory. + */ void radeon_gart_table_vram_unpin(struct radeon_device *rdev) { int r; @@ -126,6 +193,15 @@ void radeon_gart_table_vram_unpin(struct radeon_device *rdev) } } +/** + * radeon_gart_table_vram_free - free gart page table vram + * + * @rdev: radeon_device pointer + * + * Free the video memory used for the GART page table + * (pcie r4xx, r5xx+). These asics require the gart table to + * be in video memory. + */ void radeon_gart_table_vram_free(struct radeon_device *rdev) { if (rdev->gart.robj == NULL) { @@ -135,12 +211,19 @@ void radeon_gart_table_vram_free(struct radeon_device *rdev) radeon_bo_unref(&rdev->gart.robj); } - - - /* * Common gart functions. */ +/** + * radeon_gart_unbind - unbind pages from the gart page table + * + * @rdev: radeon_device pointer + * @offset: offset into the GPU's gart aperture + * @pages: number of pages to unbind + * + * Unbinds the requested pages from the gart page table and + * replaces them with the dummy page (all asics). + */ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, int pages) { @@ -172,6 +255,19 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, radeon_gart_tlb_flush(rdev); } +/** + * radeon_gart_bind - bind pages into the gart page table + * + * @rdev: radeon_device pointer + * @offset: offset into the GPU's gart aperture + * @pages: number of pages to bind + * @pagelist: pages to bind + * @dma_addr: DMA addresses of pages + * + * Binds the requested pages to the gart page table + * (all asics). + * Returns 0 for success, -EINVAL for failure. + */ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, int pages, struct page **pagelist, dma_addr_t *dma_addr) { @@ -203,6 +299,14 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, return 0; } +/** + * radeon_gart_restore - bind all pages in the gart page table + * + * @rdev: radeon_device pointer + * + * Binds all pages in the gart page table (all asics). + * Used to rebuild the gart table on device startup or resume. + */ void radeon_gart_restore(struct radeon_device *rdev) { int i, j, t; @@ -222,6 +326,14 @@ void radeon_gart_restore(struct radeon_device *rdev) radeon_gart_tlb_flush(rdev); } +/** + * radeon_gart_init - init the driver info for managing the gart + * + * @rdev: radeon_device pointer + * + * Allocate the dummy page and init the gart driver info (all asics). + * Returns 0 for success, error for failure. + */ int radeon_gart_init(struct radeon_device *rdev) { int r, i; @@ -262,6 +374,13 @@ int radeon_gart_init(struct radeon_device *rdev) return 0; } +/** + * radeon_gart_fini - tear down the driver info for managing the gart + * + * @rdev: radeon_device pointer + * + * Tear down the gart driver info and free the dummy page (all asics). + */ void radeon_gart_fini(struct radeon_device *rdev) { if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) { -- cgit v1.2.3 From 09db86443230503f57d4079694a337f4e3c7b5a2 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 17 Jul 2012 14:02:40 -0400 Subject: drm/radeon: document VM functions in radeon_gart.c (v3) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Document the VM functions in radeon_gart.c v2: adjust per Christian's suggestions v3: adjust to Christians's latest changes Signed-off-by: Alex Deucher Reviewed-by: Christian König --- drivers/gpu/drm/radeon/radeon_gart.c | 142 +++++++++++++++++++++++++++++++++++ 1 file changed, 142 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 12135ec94e8d..9727ea117223 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -396,12 +396,40 @@ void radeon_gart_fini(struct radeon_device *rdev) radeon_dummy_page_fini(rdev); } +/* + * GPUVM + * GPUVM is similar to the legacy gart on older asics, however + * rather than there being a single global gart table + * for the entire GPU, there are multiple VM page tables active + * at any given time. The VM page tables can contain a mix + * vram pages and system memory pages and system memory pages + * can be mapped as snooped (cached system pages) or unsnooped + * (uncached system pages). + * Each VM has an ID associated with it and there is a page table + * associated with each VMID. When execting a command buffer, + * the kernel tells the the ring what VMID to use for that command + * buffer. VMIDs are allocated dynamically as commands are submitted. + * The userspace drivers maintain their own address space and the kernel + * sets up their pages tables accordingly when they submit their + * command buffers and a VMID is assigned. + * Cayman/Trinity support up to 8 active VMs at any given time; + * SI supports 16. + */ + /* * vm helpers * * TODO bind a default page at vm initialization for default address */ +/** + * radeon_vm_manager_init - init the vm manager + * + * @rdev: radeon_device pointer + * + * Init the vm manager (cayman+). + * Returns 0 for success, error for failure. + */ int radeon_vm_manager_init(struct radeon_device *rdev) { struct radeon_vm *vm; @@ -456,6 +484,16 @@ int radeon_vm_manager_init(struct radeon_device *rdev) } /* global mutex must be lock */ +/** + * radeon_vm_unbind_locked - unbind a specific vm + * + * @rdev: radeon_device pointer + * @vm: vm to unbind + * + * Unbind the requested vm (cayman+). + * Wait for use of the VM to finish, then unbind the page table, + * and free the page table memory. + */ static void radeon_vm_unbind_locked(struct radeon_device *rdev, struct radeon_vm *vm) { @@ -495,6 +533,13 @@ static void radeon_vm_unbind_locked(struct radeon_device *rdev, } } +/** + * radeon_vm_manager_fini - tear down the vm manager + * + * @rdev: radeon_device pointer + * + * Tear down the VM manager (cayman+). + */ void radeon_vm_manager_fini(struct radeon_device *rdev) { struct radeon_vm *vm, *tmp; @@ -516,6 +561,14 @@ void radeon_vm_manager_fini(struct radeon_device *rdev) } /* global mutex must be locked */ +/** + * radeon_vm_unbind - locked version of unbind + * + * @rdev: radeon_device pointer + * @vm: vm to unbind + * + * Locked version that wraps radeon_vm_unbind_locked (cayman+). + */ void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm) { mutex_lock(&vm->mutex); @@ -524,6 +577,18 @@ void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm) } /* global and local mutex must be locked */ +/** + * radeon_vm_bind - bind a page table to a VMID + * + * @rdev: radeon_device pointer + * @vm: vm to bind + * + * Bind the requested vm (cayman+). + * Suballocate memory for the page table, allocate a VMID + * and bind the page table to it, and finally start to populate + * the page table. + * Returns 0 for success, error for failure. + */ int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm) { struct radeon_vm *vm_evict; @@ -586,6 +651,20 @@ retry_id: } /* object have to be reserved */ +/** + * radeon_vm_bo_add - add a bo to a specific vm + * + * @rdev: radeon_device pointer + * @vm: requested vm + * @bo: radeon buffer object + * @offset: requested offset of the buffer in the VM address space + * @flags: attributes of pages (read/write/valid/etc.) + * + * Add @bo into the requested vm (cayman+). + * Add @bo to the list of bos associated with the vm and validate + * the offset requested within the vm address space. + * Returns 0 for success, error for failure. + */ int radeon_vm_bo_add(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_bo *bo, @@ -663,6 +742,17 @@ int radeon_vm_bo_add(struct radeon_device *rdev, return 0; } +/** + * radeon_vm_get_addr - get the physical address of the page + * + * @rdev: radeon_device pointer + * @mem: ttm mem + * @pfn: pfn + * + * Look up the physical address of the page that the pte resolves + * to (cayman+). + * Returns the physical address of the page. + */ static u64 radeon_vm_get_addr(struct radeon_device *rdev, struct ttm_mem_reg *mem, unsigned pfn) @@ -692,6 +782,17 @@ static u64 radeon_vm_get_addr(struct radeon_device *rdev, } /* object have to be reserved & global and local mutex must be locked */ +/** + * radeon_vm_bo_update_pte - map a bo into the vm page table + * + * @rdev: radeon_device pointer + * @vm: requested vm + * @bo: radeon buffer object + * @mem: ttm mem + * + * Fill in the page table entries for @bo (cayman+). + * Returns 0 for success, -EINVAL for failure. + */ int radeon_vm_bo_update_pte(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_bo *bo, @@ -740,6 +841,18 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, } /* object have to be reserved */ +/** + * radeon_vm_bo_rmv - remove a bo to a specific vm + * + * @rdev: radeon_device pointer + * @vm: requested vm + * @bo: radeon buffer object + * + * Remove @bo from the requested vm (cayman+). + * Remove @bo from the list of bos associated with the vm and + * remove the ptes for @bo in the page table. + * Returns 0 for success. + */ int radeon_vm_bo_rmv(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_bo *bo) @@ -762,6 +875,15 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev, return 0; } +/** + * radeon_vm_bo_invalidate - mark the bo as invalid + * + * @rdev: radeon_device pointer + * @vm: requested vm + * @bo: radeon buffer object + * + * Mark @bo as invalid (cayman+). + */ void radeon_vm_bo_invalidate(struct radeon_device *rdev, struct radeon_bo *bo) { @@ -773,6 +895,17 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev, } } +/** + * radeon_vm_init - initialize a vm instance + * + * @rdev: radeon_device pointer + * @vm: requested vm + * + * Init @vm (cayman+). + * Map the IB pool and any other shared objects into the VM + * by default as it's used by all VMs. + * Returns 0 for success, error for failure. + */ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) { int r; @@ -791,6 +924,15 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) return r; } +/** + * radeon_vm_init - tear down a vm instance + * + * @rdev: radeon_device pointer + * @vm: requested vm + * + * Tear down @vm (cayman+). + * Unbind the VM and remove all bos from the vm bo list + */ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) { struct radeon_bo_va *bo_va, *tmp; -- cgit v1.2.3 From 48ef779ff32561c2ed780808fcd0ab4633df22bd Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 17 Jul 2012 14:02:41 -0400 Subject: drm/radeon: start to document the functions r100.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Still a lot more to do. Signed-off-by: Alex Deucher Reviewed-by: Christian König --- drivers/gpu/drm/radeon/r100.c | 127 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 124 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 2e0a60328bd4..8acb34fd3fd5 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -65,6 +65,19 @@ MODULE_FIRMWARE(FIRMWARE_R520); #include "r100_track.h" +/* This files gather functions specifics to: + * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 + * and others in some cases. + */ + +/** + * r100_wait_for_vblank - vblank wait asic callback. + * + * @rdev: radeon_device pointer + * @crtc: crtc to wait for vblank on + * + * Wait for vblank on the requested crtc (r1xx-r4xx). + */ void r100_wait_for_vblank(struct radeon_device *rdev, int crtc) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; @@ -99,22 +112,49 @@ void r100_wait_for_vblank(struct radeon_device *rdev, int crtc) } } -/* This files gather functions specifics to: - * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 +/** + * r100_pre_page_flip - pre-pageflip callback. + * + * @rdev: radeon_device pointer + * @crtc: crtc to prepare for pageflip on + * + * Pre-pageflip callback (r1xx-r4xx). + * Enables the pageflip irq (vblank irq). */ - void r100_pre_page_flip(struct radeon_device *rdev, int crtc) { /* enable the pflip int */ radeon_irq_kms_pflip_irq_get(rdev, crtc); } +/** + * r100_post_page_flip - pos-pageflip callback. + * + * @rdev: radeon_device pointer + * @crtc: crtc to cleanup pageflip on + * + * Post-pageflip callback (r1xx-r4xx). + * Disables the pageflip irq (vblank irq). + */ void r100_post_page_flip(struct radeon_device *rdev, int crtc) { /* disable the pflip int */ radeon_irq_kms_pflip_irq_put(rdev, crtc); } +/** + * r100_page_flip - pageflip callback. + * + * @rdev: radeon_device pointer + * @crtc_id: crtc to cleanup pageflip on + * @crtc_base: new address of the crtc (GPU MC address) + * + * Does the actual pageflip (r1xx-r4xx). + * During vblank we take the crtc lock and wait for the update_pending + * bit to go high, when it does, we release the lock, and allow the + * double buffered update to take place. + * Returns the current update pending status. + */ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; @@ -141,6 +181,15 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET; } +/** + * r100_pm_get_dynpm_state - look up dynpm power state callback. + * + * @rdev: radeon_device pointer + * + * Look up the optimal power state based on the + * current state of the GPU (r1xx-r5xx). + * Used for dynpm only. + */ void r100_pm_get_dynpm_state(struct radeon_device *rdev) { int i; @@ -223,6 +272,15 @@ void r100_pm_get_dynpm_state(struct radeon_device *rdev) pcie_lanes); } +/** + * r100_pm_init_profile - Initialize power profiles callback. + * + * @rdev: radeon_device pointer + * + * Initialize the power states used in profile mode + * (r1xx-r3xx). + * Used for profile mode only. + */ void r100_pm_init_profile(struct radeon_device *rdev) { /* default */ @@ -262,6 +320,14 @@ void r100_pm_init_profile(struct radeon_device *rdev) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; } +/** + * r100_pm_misc - set additional pm hw parameters callback. + * + * @rdev: radeon_device pointer + * + * Set non-clock parameters associated with a power state + * (voltage, pcie lanes, etc.) (r1xx-r4xx). + */ void r100_pm_misc(struct radeon_device *rdev) { int requested_index = rdev->pm.requested_power_state_index; @@ -353,6 +419,13 @@ void r100_pm_misc(struct radeon_device *rdev) } } +/** + * r100_pm_prepare - pre-power state change callback. + * + * @rdev: radeon_device pointer + * + * Prepare for a power state change (r1xx-r4xx). + */ void r100_pm_prepare(struct radeon_device *rdev) { struct drm_device *ddev = rdev->ddev; @@ -377,6 +450,13 @@ void r100_pm_prepare(struct radeon_device *rdev) } } +/** + * r100_pm_finish - post-power state change callback. + * + * @rdev: radeon_device pointer + * + * Clean up after a power state change (r1xx-r4xx). + */ void r100_pm_finish(struct radeon_device *rdev) { struct drm_device *ddev = rdev->ddev; @@ -401,6 +481,14 @@ void r100_pm_finish(struct radeon_device *rdev) } } +/** + * r100_gui_idle - gui idle callback. + * + * @rdev: radeon_device pointer + * + * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx). + * Returns true if idle, false if not. + */ bool r100_gui_idle(struct radeon_device *rdev) { if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE) @@ -410,6 +498,15 @@ bool r100_gui_idle(struct radeon_device *rdev) } /* hpd for digital panel detect/disconnect */ +/** + * r100_hpd_sense - hpd sense callback. + * + * @rdev: radeon_device pointer + * @hpd: hpd (hotplug detect) pin + * + * Checks if a digital monitor is connected (r1xx-r4xx). + * Returns true if connected, false if not connected. + */ bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) { bool connected = false; @@ -429,6 +526,14 @@ bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) return connected; } +/** + * r100_hpd_set_polarity - hpd set polarity callback. + * + * @rdev: radeon_device pointer + * @hpd: hpd (hotplug detect) pin + * + * Set the polarity of the hpd pin (r1xx-r4xx). + */ void r100_hpd_set_polarity(struct radeon_device *rdev, enum radeon_hpd_id hpd) { @@ -457,6 +562,14 @@ void r100_hpd_set_polarity(struct radeon_device *rdev, } } +/** + * r100_hpd_init - hpd setup callback. + * + * @rdev: radeon_device pointer + * + * Setup the hpd pins used by the card (r1xx-r4xx). + * Set the polarity, and enable the hpd interrupts. + */ void r100_hpd_init(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; @@ -471,6 +584,14 @@ void r100_hpd_init(struct radeon_device *rdev) radeon_irq_kms_enable_hpd(rdev, enable); } +/** + * r100_hpd_fini - hpd tear down callback. + * + * @rdev: radeon_device pointer + * + * Tear down the hpd pins used by the card (r1xx-r4xx). + * Disable the hpd interrupts. + */ void r100_hpd_fini(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; -- cgit v1.2.3 From 377edc8bbd34f177d4ad7113ef70b76ed278fa94 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 17 Jul 2012 14:02:42 -0400 Subject: drm/radeon: start to document evergreen.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Still a lot to do. Signed-off-by: Alex Deucher Reviewed-by: Christian König --- drivers/gpu/drm/radeon/evergreen.c | 120 +++++++++++++++++++++++++++++++++++++ 1 file changed, 120 insertions(+) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 870009ad5f56..08293f1ba6aa 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -99,6 +99,14 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) } } +/** + * dce4_wait_for_vblank - vblank wait asic callback. + * + * @rdev: radeon_device pointer + * @crtc: crtc to wait for vblank on + * + * Wait for vblank on the requested crtc (evergreen+). + */ void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; @@ -118,18 +126,49 @@ void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc) } } +/** + * radeon_irq_kms_pflip_irq_get - pre-pageflip callback. + * + * @rdev: radeon_device pointer + * @crtc: crtc to prepare for pageflip on + * + * Pre-pageflip callback (evergreen+). + * Enables the pageflip irq (vblank irq). + */ void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) { /* enable the pflip int */ radeon_irq_kms_pflip_irq_get(rdev, crtc); } +/** + * evergreen_post_page_flip - pos-pageflip callback. + * + * @rdev: radeon_device pointer + * @crtc: crtc to cleanup pageflip on + * + * Post-pageflip callback (evergreen+). + * Disables the pageflip irq (vblank irq). + */ void evergreen_post_page_flip(struct radeon_device *rdev, int crtc) { /* disable the pflip int */ radeon_irq_kms_pflip_irq_put(rdev, crtc); } +/** + * evergreen_page_flip - pageflip callback. + * + * @rdev: radeon_device pointer + * @crtc_id: crtc to cleanup pageflip on + * @crtc_base: new address of the crtc (GPU MC address) + * + * Does the actual pageflip (evergreen+). + * During vblank we take the crtc lock and wait for the update_pending + * bit to go high, when it does, we release the lock, and allow the + * double buffered update to take place. + * Returns the current update pending status. + */ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; @@ -214,6 +253,15 @@ int sumo_get_temp(struct radeon_device *rdev) return actual_temp * 1000; } +/** + * sumo_pm_init_profile - Initialize power profiles callback. + * + * @rdev: radeon_device pointer + * + * Initialize the power states used in profile mode + * (sumo, trinity, SI). + * Used for profile mode only. + */ void sumo_pm_init_profile(struct radeon_device *rdev) { int idx; @@ -265,6 +313,14 @@ void sumo_pm_init_profile(struct radeon_device *rdev) rdev->pm.power_state[idx].num_clock_modes - 1; } +/** + * evergreen_pm_misc - set additional pm hw parameters callback. + * + * @rdev: radeon_device pointer + * + * Set non-clock parameters associated with a power state + * (voltage, etc.) (evergreen+). + */ void evergreen_pm_misc(struct radeon_device *rdev) { int req_ps_idx = rdev->pm.requested_power_state_index; @@ -292,6 +348,13 @@ void evergreen_pm_misc(struct radeon_device *rdev) } } +/** + * evergreen_pm_prepare - pre-power state change callback. + * + * @rdev: radeon_device pointer + * + * Prepare for a power state change (evergreen+). + */ void evergreen_pm_prepare(struct radeon_device *rdev) { struct drm_device *ddev = rdev->ddev; @@ -310,6 +373,13 @@ void evergreen_pm_prepare(struct radeon_device *rdev) } } +/** + * evergreen_pm_finish - post-power state change callback. + * + * @rdev: radeon_device pointer + * + * Clean up after a power state change (evergreen+). + */ void evergreen_pm_finish(struct radeon_device *rdev) { struct drm_device *ddev = rdev->ddev; @@ -328,6 +398,15 @@ void evergreen_pm_finish(struct radeon_device *rdev) } } +/** + * evergreen_hpd_sense - hpd sense callback. + * + * @rdev: radeon_device pointer + * @hpd: hpd (hotplug detect) pin + * + * Checks if a digital monitor is connected (evergreen+). + * Returns true if connected, false if not connected. + */ bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) { bool connected = false; @@ -364,6 +443,14 @@ bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) return connected; } +/** + * evergreen_hpd_set_polarity - hpd set polarity callback. + * + * @rdev: radeon_device pointer + * @hpd: hpd (hotplug detect) pin + * + * Set the polarity of the hpd pin (evergreen+). + */ void evergreen_hpd_set_polarity(struct radeon_device *rdev, enum radeon_hpd_id hpd) { @@ -424,6 +511,14 @@ void evergreen_hpd_set_polarity(struct radeon_device *rdev, } } +/** + * evergreen_hpd_init - hpd setup callback. + * + * @rdev: radeon_device pointer + * + * Setup the hpd pins used by the card (evergreen+). + * Enable the pin, set the polarity, and enable the hpd interrupts. + */ void evergreen_hpd_init(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; @@ -462,6 +557,14 @@ void evergreen_hpd_init(struct radeon_device *rdev) radeon_irq_kms_enable_hpd(rdev, enabled); } +/** + * evergreen_hpd_fini - hpd tear down callback. + * + * @rdev: radeon_device pointer + * + * Tear down the hpd pins used by the card (evergreen+). + * Disable the hpd interrupts. + */ void evergreen_hpd_fini(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; @@ -925,6 +1028,14 @@ static void evergreen_program_watermarks(struct radeon_device *rdev, } +/** + * evergreen_bandwidth_update - update display watermarks callback. + * + * @rdev: radeon_device pointer + * + * Update the display watermarks based on the requested mode(s) + * (evergreen+). + */ void evergreen_bandwidth_update(struct radeon_device *rdev) { struct drm_display_mode *mode0 = NULL; @@ -948,6 +1059,15 @@ void evergreen_bandwidth_update(struct radeon_device *rdev) } } +/** + * evergreen_mc_wait_for_idle - wait for MC idle callback. + * + * @rdev: radeon_device pointer + * + * Wait for the MC (memory controller) to be idle. + * (evergreen+). + * Returns 0 if the MC is idle, -1 if not. + */ int evergreen_mc_wait_for_idle(struct radeon_device *rdev) { unsigned i; -- cgit v1.2.3 From 26fe45a0a76f165425f332a5aaa298f149f9db22 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 17 Jul 2012 14:02:43 -0400 Subject: drm/radeon: fix up pll selection on DCE5/6 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Selecting ATOM_PPLL_INVALID should be equivalent as the DCPLL or PPLL0 are already programmed for the DISPCLK, but the preferred method is to always specify the PLL selected. SetPixelClock will check the parameters and skip the programming if the PLL is already set up. Signed-off-by: Alex Deucher Reviewed-by: Christian König --- drivers/gpu/drm/radeon/atombios_crtc.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 3904d7964a4b..bbbeb98d4bb7 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1539,7 +1539,11 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) * crtc virtual pixel clock. */ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) { - if (ASIC_IS_DCE5(rdev) || rdev->clock.dp_extclk) + if (ASIC_IS_DCE5(rdev)) + return ATOM_DCPLL; + else if (ASIC_IS_DCE6(rdev)) + return ATOM_PPLL0; + else if (rdev->clock.dp_extclk) return ATOM_PPLL_INVALID; } } -- cgit v1.2.3 From f312f0937855c962edd455b250ed16ada818b234 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 17 Jul 2012 14:02:44 -0400 Subject: drm/radeon: fix SS setup for DCPLL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Need to actually set the SS parameters rather than just 0. Signed-off-by: Alex Deucher Reviewed-by: Christian König --- drivers/gpu/drm/radeon/atombios_crtc.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index bbbeb98d4bb7..a4664e015a6f 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -457,22 +457,18 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev, switch (pll_id) { case ATOM_PPLL1: args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL; - args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); - args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step); break; case ATOM_PPLL2: args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL; - args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); - args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step); break; case ATOM_DCPLL: args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL; - args.v3.usSpreadSpectrumAmount = cpu_to_le16(0); - args.v3.usSpreadSpectrumStep = cpu_to_le16(0); break; case ATOM_PPLL_INVALID: return; } + args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); + args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step); args.v3.ucEnable = enable; if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE61(rdev)) args.v3.ucEnable = ATOM_DISABLE; @@ -482,22 +478,18 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev, switch (pll_id) { case ATOM_PPLL1: args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL; - args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); - args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step); break; case ATOM_PPLL2: args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL; - args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); - args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step); break; case ATOM_DCPLL: args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL; - args.v2.usSpreadSpectrumAmount = cpu_to_le16(0); - args.v2.usSpreadSpectrumStep = cpu_to_le16(0); break; case ATOM_PPLL_INVALID: return; } + args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); + args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step); args.v2.ucEnable = enable; if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev)) args.v2.ucEnable = ATOM_DISABLE; -- cgit v1.2.3 From 343065a605646b4c1fee8b1ca07b715283813b11 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 15 Jun 2012 11:01:22 +0200 Subject: drm/fb-helper: delay hotplug handling when partially bound Ok, this requires quite a dance to actually hit: 1) We plug in a 2nd screen, enable it in both X and (by vt-switching) in the fbcon. 2) We disable that screen again in with xrandr. 3) We vt-switch again, so that fbcon displays on the 2nd screen, but X on the first screen. This obviously needs a driver that doesn't switch off unused functions when regaining the VT. 3) When X controls the vt, we unplug that screen. Now drm_fb_helper_hotplug_event we noticed that that some crtcs are bound, but because we still have the fbcon on the 2nd screeen we also have bound set. Which means the fbcon wrongly assumes it's in control of everything an happily disables the output on the 2nd screen, but enables its fb on the first screen. Work around this issue by counting how many crtcs are bound and how many are bound to fbcon and assuming that when fbcon isn't bound to all of them, it better not touch the output configuration. Conceptually this is the same as only restoring the fbcon output configuration on the driver's ->lastclose, when we're sure that no one else is using kms. So this should be consistent with existing kms drivers. Chris has created a separate patch for the intel ddx, but I think we should fix this issue here regardless - the fbcon messing with the output config while it's not fully in control simply isn't a too polite behaviour. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=50772 Tested-by: Maxim A. Nikulin Signed-Off-by: Daniel Vetter Reviewed-by: Chris Wilson Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fb_helper.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 5683b7fdd746..d3f15b9d1d82 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1353,7 +1353,7 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) struct drm_device *dev = fb_helper->dev; int count = 0; u32 max_width, max_height, bpp_sel; - bool bound = false, crtcs_bound = false; + int bound = 0, crtcs_bound = 0; struct drm_crtc *crtc; if (!fb_helper->fb) @@ -1362,12 +1362,12 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) mutex_lock(&dev->mode_config.mutex); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { if (crtc->fb) - crtcs_bound = true; + crtcs_bound++; if (crtc->fb == fb_helper->fb) - bound = true; + bound++; } - if (!bound && crtcs_bound) { + if (bound < crtcs_bound) { fb_helper->delayed_hotplug = true; mutex_unlock(&dev->mode_config.mutex); return 0; -- cgit v1.2.3 From d3904754f2a67c503e262f938353cba491525320 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Jul 2012 16:28:07 +0200 Subject: drm/fb helper: don't call drm_crtc_helper_set_config Go through the interface vtable instead, because not everyone might be using the crtc helper code. Cc: dri-devel@lists.freedesktop.org Signed-Off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fb_helper.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index d3f15b9d1d82..f546d1e8af82 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -228,7 +228,7 @@ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper) int i, ret; for (i = 0; i < fb_helper->crtc_count; i++) { struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set; - ret = drm_crtc_helper_set_config(mode_set); + ret = mode_set->crtc->funcs->set_config(mode_set); if (ret) error = true; } -- cgit v1.2.3 From 59fd415ded4aa9ac2f80ca3ac36e3a014c7e552e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Jul 2012 16:28:08 +0200 Subject: drm: remove the list_head from drm_mode_set It's unused. At it confused me quite a bit until I've discovered that. Cc: dri-devel@lists.freedesktop.org Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- include/drm/drm_crtc.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index bac55c215113..a1a0386e0160 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -676,8 +676,6 @@ struct drm_plane { * This is used to set modes. */ struct drm_mode_set { - struct list_head head; - struct drm_framebuffer *fb; struct drm_crtc *crtc; struct drm_display_mode *mode; -- cgit v1.2.3 From e811f5ae19043b2ac2c28e147a4274038e655598 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 17 Jul 2012 17:56:50 +0200 Subject: drm: Make the .mode_fixup() operations mode argument a const pointer The passed mode must not be modified by the operation, make it const. Signed-off-by: Laurent Pinchart Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/ast/ast_mode.c | 6 +++--- drivers/gpu/drm/cirrus/cirrus_mode.c | 6 +++--- drivers/gpu/drm/exynos/exynos_drm_crtc.c | 2 +- drivers/gpu/drm/exynos/exynos_drm_drv.h | 2 +- drivers/gpu/drm/exynos/exynos_drm_encoder.c | 2 +- drivers/gpu/drm/exynos/exynos_drm_hdmi.c | 2 +- drivers/gpu/drm/exynos/exynos_drm_hdmi.h | 2 +- drivers/gpu/drm/exynos/exynos_hdmi.c | 2 +- drivers/gpu/drm/gma500/cdv_intel_crt.c | 2 +- drivers/gpu/drm/gma500/cdv_intel_display.c | 2 +- drivers/gpu/drm/gma500/cdv_intel_hdmi.c | 2 +- drivers/gpu/drm/gma500/cdv_intel_lvds.c | 2 +- drivers/gpu/drm/gma500/mdfld_dsi_dpi.c | 2 +- drivers/gpu/drm/gma500/mdfld_dsi_dpi.h | 2 +- drivers/gpu/drm/gma500/mdfld_intel_display.c | 2 +- drivers/gpu/drm/gma500/oaktrail_crtc.c | 2 +- drivers/gpu/drm/gma500/oaktrail_hdmi.c | 2 +- drivers/gpu/drm/gma500/psb_intel_display.c | 2 +- drivers/gpu/drm/gma500/psb_intel_drv.h | 2 +- drivers/gpu/drm/gma500/psb_intel_lvds.c | 2 +- drivers/gpu/drm/gma500/psb_intel_sdvo.c | 6 +++--- drivers/gpu/drm/i2c/ch7006_drv.c | 2 +- drivers/gpu/drm/i2c/ch7006_mode.c | 2 +- drivers/gpu/drm/i2c/ch7006_priv.h | 2 +- drivers/gpu/drm/i2c/sil164_drv.c | 2 +- drivers/gpu/drm/i915/dvo.h | 2 +- drivers/gpu/drm/i915/intel_crt.c | 2 +- drivers/gpu/drm/i915/intel_dp.c | 3 ++- drivers/gpu/drm/i915/intel_dvo.c | 2 +- drivers/gpu/drm/i915/intel_hdmi.c | 2 +- drivers/gpu/drm/i915/intel_lvds.c | 2 +- drivers/gpu/drm/i915/intel_sdvo.c | 6 +++--- drivers/gpu/drm/i915/intel_tv.c | 3 ++- drivers/gpu/drm/mgag200/mgag200_mode.c | 8 ++++---- drivers/gpu/drm/nouveau/nv04_crtc.c | 2 +- drivers/gpu/drm/nouveau/nv04_dac.c | 2 +- drivers/gpu/drm/nouveau/nv04_dfp.c | 2 +- drivers/gpu/drm/nouveau/nv17_tv.c | 2 +- drivers/gpu/drm/nouveau/nv50_crtc.c | 2 +- drivers/gpu/drm/nouveau/nv50_dac.c | 3 ++- drivers/gpu/drm/nouveau/nv50_sor.c | 3 ++- drivers/gpu/drm/nouveau/nvd0_display.c | 8 +++++--- drivers/gpu/drm/radeon/atombios_crtc.c | 2 +- drivers/gpu/drm/radeon/atombios_dp.c | 2 +- drivers/gpu/drm/radeon/atombios_encoders.c | 4 ++-- drivers/gpu/drm/radeon/radeon_display.c | 4 ++-- drivers/gpu/drm/radeon/radeon_legacy_crtc.c | 2 +- drivers/gpu/drm/radeon/radeon_legacy_encoders.c | 2 +- drivers/gpu/drm/radeon/radeon_mode.h | 4 ++-- drivers/gpu/drm/udl/udl_encoder.c | 2 +- drivers/gpu/drm/udl/udl_modeset.c | 2 +- drivers/staging/omapdrm/omap_crtc.c | 2 +- drivers/staging/omapdrm/omap_encoder.c | 2 +- include/drm/drm_crtc_helper.h | 4 ++-- include/drm/drm_encoder_slave.h | 2 +- 55 files changed, 78 insertions(+), 72 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 65f9d231af14..7282c081fb53 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -460,8 +460,8 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode) } static bool ast_crtc_mode_fixup(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { return true; } @@ -680,7 +680,7 @@ static void ast_encoder_dpms(struct drm_encoder *encoder, int mode) } static bool ast_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c index 100f6308c509..a44d31aa4e3c 100644 --- a/drivers/gpu/drm/cirrus/cirrus_mode.c +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c @@ -97,7 +97,7 @@ static void cirrus_crtc_dpms(struct drm_crtc *crtc, int mode) * to just pass that straight through, so this does nothing */ static bool cirrus_crtc_mode_fixup(struct drm_crtc *crtc, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; @@ -429,8 +429,8 @@ void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, static bool cirrus_encoder_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { return true; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 4afb625128d7..32a34c85899b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -237,7 +237,7 @@ static void exynos_drm_crtc_commit(struct drm_crtc *crtc) static bool exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { DRM_DEBUG_KMS("%s\n", __FILE__); diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index c82c90c443e7..277653d5fda0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -174,7 +174,7 @@ struct exynos_drm_manager_ops { void (*apply)(struct device *subdrv_dev); void (*mode_fixup)(struct device *subdrv_dev, struct drm_connector *connector, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); void (*mode_set)(struct device *subdrv_dev, void *mode); void (*get_max_resol)(struct device *subdrv_dev, unsigned int *width, diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c index 23d5ad379f86..4a13a747f5d4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c +++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c @@ -108,7 +108,7 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode) static bool exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c index 5d9d2c2f8f3f..8ffcdf8b9e22 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c @@ -142,7 +142,7 @@ static void drm_hdmi_disable_vblank(struct device *subdrv_dev) static void drm_hdmi_mode_fixup(struct device *subdrv_dev, struct drm_connector *connector, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_hdmi_context *ctx = to_context(subdrv_dev); diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h index bd8126996e52..a91c42088e42 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h +++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h @@ -51,7 +51,7 @@ struct exynos_hdmi_ops { /* manager */ void (*mode_fixup)(void *ctx, struct drm_connector *connector, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); void (*mode_set)(void *ctx, void *mode); void (*get_max_resol)(void *ctx, unsigned int *width, diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index a137e9e39a33..066bde3f19c4 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1940,7 +1940,7 @@ static void hdmi_conf_apply(struct hdmi_context *hdata) } static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_display_mode *m; diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c index 187422018601..8c175345d85c 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_crt.c +++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c @@ -82,7 +82,7 @@ static int cdv_intel_crt_mode_valid(struct drm_connector *connector, } static bool cdv_intel_crt_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index c3e9a0f701df..a68509ba22a8 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -913,7 +913,7 @@ static void cdv_intel_crtc_commit(struct drm_crtc *crtc) } static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c index 88b59d4a7b7f..a86f87b9ddde 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c +++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c @@ -90,7 +90,7 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder, } static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c index ff5b58eb878c..c7f9468b74ba 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -270,7 +270,7 @@ static int cdv_intel_lvds_mode_valid(struct drm_connector *connector, } static bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c index b34ff097b979..d4813e03f5ee 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c +++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c @@ -684,7 +684,7 @@ void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode) } bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder); diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h index 6f762478b959..2b40663e1696 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h +++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h @@ -65,7 +65,7 @@ extern struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev, /* MDFLD DPI helper functions */ extern void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode); extern bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); extern void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder); extern void mdfld_dsi_dpi_commit(struct drm_encoder *encoder); diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c index 3f3cd619c79f..dec6a9aea3c6 100644 --- a/drivers/gpu/drm/gma500/mdfld_intel_display.c +++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c @@ -117,7 +117,7 @@ static void psb_intel_crtc_commit(struct drm_crtc *crtc) } static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c index f821c835ca90..cdafd2acc72f 100644 --- a/drivers/gpu/drm/gma500/oaktrail_crtc.c +++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c @@ -487,7 +487,7 @@ oaktrail_crtc_mode_set_exit: } static bool oaktrail_crtc_mode_fixup(struct drm_crtc *crtc, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c index c10899c953b9..2eb3dc4e9c9b 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c @@ -191,7 +191,7 @@ static int oaktrail_hdmi_mode_valid(struct drm_connector *connector, } static bool oaktrail_hdmi_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index 36c3c99612f6..30dc22a7156c 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -543,7 +543,7 @@ void psb_intel_encoder_destroy(struct drm_encoder *encoder) } static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h index 2515f83248cb..ebe1a28f60e1 100644 --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -268,7 +268,7 @@ extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device *mode_cmd, void *mm_private); extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); extern int psb_intel_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode); diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index c83f5b5d1057..37adc9edf974 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -375,7 +375,7 @@ int psb_intel_lvds_mode_valid(struct drm_connector *connector, } bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index d39b15be7649..0466c7b985f8 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -901,7 +901,7 @@ static bool psb_intel_sdvo_set_tv_format(struct psb_intel_sdvo *psb_intel_sdvo) static bool psb_intel_sdvo_set_output_timings_from_mode(struct psb_intel_sdvo *psb_intel_sdvo, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { struct psb_intel_sdvo_dtd output_dtd; @@ -918,7 +918,7 @@ psb_intel_sdvo_set_output_timings_from_mode(struct psb_intel_sdvo *psb_intel_sdv static bool psb_intel_sdvo_set_input_timings_for_mode(struct psb_intel_sdvo *psb_intel_sdvo, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { /* Reset the input timing to the screen. Assume always input 0. */ @@ -942,7 +942,7 @@ psb_intel_sdvo_set_input_timings_for_mode(struct psb_intel_sdvo *psb_intel_sdvo, } static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder); diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c index d3f2e8785010..36d952280c50 100644 --- a/drivers/gpu/drm/i2c/ch7006_drv.c +++ b/drivers/gpu/drm/i2c/ch7006_drv.c @@ -88,7 +88,7 @@ static void ch7006_encoder_restore(struct drm_encoder *encoder) } static bool ch7006_encoder_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct ch7006_priv *priv = to_ch7006_priv(encoder); diff --git a/drivers/gpu/drm/i2c/ch7006_mode.c b/drivers/gpu/drm/i2c/ch7006_mode.c index c860f24a5afc..9b83574141a6 100644 --- a/drivers/gpu/drm/i2c/ch7006_mode.c +++ b/drivers/gpu/drm/i2c/ch7006_mode.c @@ -172,7 +172,7 @@ struct ch7006_mode ch7006_modes[] = { }; struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder, - struct drm_display_mode *drm_mode) + const struct drm_display_mode *drm_mode) { struct ch7006_priv *priv = to_ch7006_priv(encoder); struct ch7006_mode *mode; diff --git a/drivers/gpu/drm/i2c/ch7006_priv.h b/drivers/gpu/drm/i2c/ch7006_priv.h index 17667b7d57e7..09599f4c0c9a 100644 --- a/drivers/gpu/drm/i2c/ch7006_priv.h +++ b/drivers/gpu/drm/i2c/ch7006_priv.h @@ -111,7 +111,7 @@ extern struct ch7006_tv_norm_info ch7006_tv_norms[]; extern struct ch7006_mode ch7006_modes[]; struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder, - struct drm_display_mode *drm_mode); + const struct drm_display_mode *drm_mode); void ch7006_setup_levels(struct drm_encoder *encoder); void ch7006_setup_subcarrier(struct drm_encoder *encoder); diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c index b7d45ab4ba69..30b8ae5e5c4a 100644 --- a/drivers/gpu/drm/i2c/sil164_drv.c +++ b/drivers/gpu/drm/i2c/sil164_drv.c @@ -254,7 +254,7 @@ sil164_encoder_restore(struct drm_encoder *encoder) static bool sil164_encoder_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h index 8c2ad014c47f..58914691a77b 100644 --- a/drivers/gpu/drm/i915/dvo.h +++ b/drivers/gpu/drm/i915/dvo.h @@ -86,7 +86,7 @@ struct intel_dvo_dev_ops { * buses with clock limitations. */ bool (*mode_fixup)(struct intel_dvo_device *dvo, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); /* diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 61d55d3141c7..48e3b76e0ab2 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -132,7 +132,7 @@ static int intel_crt_mode_valid(struct drm_connector *connector, } static bool intel_crt_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 95817c4edbe9..2bc1505132c3 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -703,7 +703,8 @@ intel_dp_i2c_init(struct intel_dp *intel_dp, } static bool -intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, +intel_dp_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 60ba50b956f2..36c542e5036b 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -136,7 +136,7 @@ static int intel_dvo_mode_valid(struct drm_connector *connector, } static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 5b2c88ca6edb..98f602427eb8 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -685,7 +685,7 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector, } static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 9b706a540d70..49f09a8b05e9 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -229,7 +229,7 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target) } static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 2f5106a488c5..26a6a4d0d078 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -935,7 +935,7 @@ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo) static bool intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { struct intel_sdvo_dtd output_dtd; @@ -954,7 +954,7 @@ intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo, * Unfortunately we have to set up the full output mode to do that. */ static bool intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct intel_sdvo_dtd input_dtd; @@ -979,7 +979,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo, } static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 3b413c9042c0..befce6c49704 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -891,7 +891,8 @@ intel_tv_mode_valid(struct drm_connector *connector, static bool -intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, +intel_tv_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index d303061b251e..a4d7c500c97b 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -78,8 +78,8 @@ static inline void mga_wait_busy(struct mga_device *mdev) * to just pass that straight through, so this does nothing */ static bool mga_crtc_mode_fixup(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { return true; } @@ -1322,8 +1322,8 @@ void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, * to handle any encoder-specific limitations */ static bool mga_encoder_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { return true; } diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index 4c31c63e5528..43accc11102f 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -215,7 +215,7 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode) } static bool -nv_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, +nv_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c index 8300266ffaea..38f19479417c 100644 --- a/drivers/gpu/drm/nouveau/nv04_dac.c +++ b/drivers/gpu/drm/nouveau/nv04_dac.c @@ -332,7 +332,7 @@ nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) } static bool nv04_dac_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { if (nv04_dac_in_use(encoder)) diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c index 2258746016f8..c2675623b7cd 100644 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c @@ -179,7 +179,7 @@ static struct drm_encoder *get_tmds_slave(struct drm_encoder *encoder) } static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c index 696d7e7dc2a0..67be5db021f5 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv.c +++ b/drivers/gpu/drm/nouveau/nv17_tv.c @@ -338,7 +338,7 @@ static int nv17_tv_mode_valid(struct drm_encoder *encoder, } static bool nv17_tv_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 97a477b3d52d..22cebd5dd694 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -527,7 +527,7 @@ nv50_crtc_commit(struct drm_crtc *crtc) } static bool -nv50_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, +nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c index eb216a446b89..2c36a6b92c53 100644 --- a/drivers/gpu/drm/nouveau/nv50_dac.c +++ b/drivers/gpu/drm/nouveau/nv50_dac.c @@ -175,7 +175,8 @@ nv50_dac_restore(struct drm_encoder *encoder) } static bool -nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, +nv50_dac_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c index a9514eaa74c1..93240bde891b 100644 --- a/drivers/gpu/drm/nouveau/nv50_sor.c +++ b/drivers/gpu/drm/nouveau/nv50_sor.c @@ -327,7 +327,8 @@ nv50_sor_restore(struct drm_encoder *encoder) } static bool -nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, +nv50_sor_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c index c486d3ce3c2c..d0d60e1e7f95 100644 --- a/drivers/gpu/drm/nouveau/nvd0_display.c +++ b/drivers/gpu/drm/nouveau/nvd0_display.c @@ -607,7 +607,7 @@ nvd0_crtc_commit(struct drm_crtc *crtc) } static bool -nvd0_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, +nvd0_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; @@ -938,7 +938,8 @@ nvd0_dac_dpms(struct drm_encoder *encoder, int mode) } static bool -nvd0_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, +nvd0_dac_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); @@ -1377,7 +1378,8 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode) } static bool -nvd0_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, +nvd0_sor_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index a4664e015a6f..9e6f76fec527 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1624,7 +1624,7 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, } static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 5131b3b0f7d2..0355536f61e4 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -608,7 +608,7 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder, } void radeon_dp_set_link_config(struct drm_connector *connector, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector; diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 486ccdf4aacd..7dfc62fae6a6 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -58,7 +58,7 @@ static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder) } static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); @@ -2234,7 +2234,7 @@ radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode) } static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 64a008d14493..7ddef8f30d0e 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1401,7 +1401,7 @@ void radeon_modeset_fini(struct radeon_device *rdev) radeon_i2c_fini(rdev); } -static bool is_hdtv_mode(struct drm_display_mode *mode) +static bool is_hdtv_mode(const struct drm_display_mode *mode) { /* try and guess if this is a tv or a monitor */ if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */ @@ -1414,7 +1414,7 @@ static bool is_hdtv_mode(struct drm_display_mode *mode) } bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = crtc->dev; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 210317c7045e..d5fd615897ec 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -990,7 +990,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) } static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index a0c82229e8f0..670e9910f869 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -244,7 +244,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, } static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 5b10ffd7bb2f..f380d59c5763 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -488,7 +488,7 @@ extern void radeon_connector_hotplug(struct drm_connector *connector); extern int radeon_dp_mode_valid_helper(struct drm_connector *connector, struct drm_display_mode *mode); extern void radeon_dp_set_link_config(struct drm_connector *connector, - struct drm_display_mode *mode); + const struct drm_display_mode *mode); extern void radeon_dp_link_train(struct drm_encoder *encoder, struct drm_connector *connector); extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector); @@ -678,7 +678,7 @@ void radeon_enc_destroy(struct drm_encoder *encoder); void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); void radeon_combios_asic_init(struct drm_device *dev); bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); void radeon_panel_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode); diff --git a/drivers/gpu/drm/udl/udl_encoder.c b/drivers/gpu/drm/udl/udl_encoder.c index 56e75f0f1df5..0731ab2e6c06 100644 --- a/drivers/gpu/drm/udl/udl_encoder.c +++ b/drivers/gpu/drm/udl/udl_encoder.c @@ -27,7 +27,7 @@ static void udl_encoder_disable(struct drm_encoder *encoder) } static bool udl_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index 0d7816789da1..ac2d717714ce 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -251,7 +251,7 @@ static void udl_crtc_dpms(struct drm_crtc *crtc, int mode) } static bool udl_crtc_mode_fixup(struct drm_crtc *crtc, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { diff --git a/drivers/staging/omapdrm/omap_crtc.c b/drivers/staging/omapdrm/omap_crtc.c index 8b864afb40b6..62e0022561bc 100644 --- a/drivers/staging/omapdrm/omap_crtc.c +++ b/drivers/staging/omapdrm/omap_crtc.c @@ -60,7 +60,7 @@ static void omap_crtc_dpms(struct drm_crtc *crtc, int mode) } static bool omap_crtc_mode_fixup(struct drm_crtc *crtc, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; diff --git a/drivers/staging/omapdrm/omap_encoder.c b/drivers/staging/omapdrm/omap_encoder.c index 06c52cb62d2f..31c735d39217 100644 --- a/drivers/staging/omapdrm/omap_encoder.c +++ b/drivers/staging/omapdrm/omap_encoder.c @@ -48,7 +48,7 @@ static void omap_encoder_dpms(struct drm_encoder *encoder, int mode) } static bool omap_encoder_mode_fixup(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index 7988e55c98d0..e01cc80c9c30 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -62,7 +62,7 @@ struct drm_crtc_helper_funcs { /* Provider can fixup or change mode timings before modeset occurs */ bool (*mode_fixup)(struct drm_crtc *crtc, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); /* Actually set the mode */ int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, @@ -96,7 +96,7 @@ struct drm_encoder_helper_funcs { void (*restore)(struct drm_encoder *encoder); bool (*mode_fixup)(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); void (*prepare)(struct drm_encoder *encoder); void (*commit)(struct drm_encoder *encoder); diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h index 2f65633d28a7..7dc385233805 100644 --- a/include/drm/drm_encoder_slave.h +++ b/include/drm/drm_encoder_slave.h @@ -54,7 +54,7 @@ struct drm_encoder_slave_funcs { void (*save)(struct drm_encoder *encoder); void (*restore)(struct drm_encoder *encoder); bool (*mode_fixup)(struct drm_encoder *encoder, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); int (*mode_valid)(struct drm_encoder *encoder, struct drm_display_mode *mode); -- cgit v1.2.3 From f60ec4c7df043df81e62891ac45383d012afe0da Mon Sep 17 00:00:00 2001 From: Michel Dänzer Date: Tue, 17 Jul 2012 19:02:09 +0200 Subject: drm/radeon: Try harder to avoid HW cursor ending on a multiple of 128 columns. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This could previously fail if either of the enabled displays was using a horizontal resolution that is a multiple of 128, and only the leftmost column of the cursor was (supposed to be) visible at the right edge of that display. The solution is to move the cursor one pixel to the left in that case. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=33183 Cc: stable@vger.kernel.org Signed-off-by: Michel Dänzer Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_cursor.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 42acc6449dd6..711e95ad39bf 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -262,8 +262,14 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, if (!(cursor_end & 0x7f)) w--; } - if (w <= 0) + if (w <= 0) { w = 1; + cursor_end = x - xorigin + w; + if (!(cursor_end & 0x7f)) { + x--; + WARN_ON_ONCE(x < 0); + } + } } } -- cgit v1.2.3 From 4dbdf0aea9585add9359e7ca5535256f866ec85c Mon Sep 17 00:00:00 2001 From: Devendra Naga Date: Sat, 7 Jul 2012 09:22:15 +0000 Subject: drm/mgag200: fix null pointer dereference we are referencing the pointer after doing alloc_apertures, as alloc_apertures kzallocs, the kzalloc may fail and we get a NULL. so we need to check for NULL before we dereference this pointer Signed-off-by: Devendra Naga Signed-off-by: Dave Airlie --- drivers/gpu/drm/mgag200/mgag200_drv.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 93e832d6c328..ea1024d79974 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -47,6 +47,9 @@ static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev) bool primary = false; ap = alloc_apertures(1); + if (!ap) + return; + ap->ranges[0].base = pci_resource_start(pdev, 0); ap->ranges[0].size = pci_resource_len(pdev, 0); -- cgit v1.2.3 From 440a7cd87e2bea9b8cf7b8b07aecad9c2f8638dd Mon Sep 17 00:00:00 2001 From: Jerome Glisse Date: Wed, 27 Jun 2012 12:25:01 -0400 Subject: drm/radeon: improve GPU lockup debugging info on r6xx/r7xx/r8xx/r9xx Print various CP register that have valuable informations regarding GPU lockup. Signed-off-by: Jerome Glisse Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 16 ++++++++++++++++ drivers/gpu/drm/radeon/evergreend.h | 4 ++++ drivers/gpu/drm/radeon/ni.c | 16 ++++++++++++++++ drivers/gpu/drm/radeon/nid.h | 4 ++++ drivers/gpu/drm/radeon/r600.c | 16 ++++++++++++++++ drivers/gpu/drm/radeon/r600d.h | 3 +++ 6 files changed, 59 insertions(+) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 08293f1ba6aa..14098ace1569 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2316,6 +2316,14 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev) RREG32(GRBM_STATUS_SE1)); dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", RREG32(SRBM_STATUS)); + dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", + RREG32(CP_STALLED_STAT1)); + dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n", + RREG32(CP_STALLED_STAT2)); + dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n", + RREG32(CP_BUSY_STAT)); + dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", + RREG32(CP_STAT)); evergreen_mc_stop(rdev, &save); if (evergreen_mc_wait_for_idle(rdev)) { dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); @@ -2353,6 +2361,14 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev) RREG32(GRBM_STATUS_SE1)); dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", RREG32(SRBM_STATUS)); + dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", + RREG32(CP_STALLED_STAT1)); + dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n", + RREG32(CP_STALLED_STAT2)); + dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n", + RREG32(CP_BUSY_STAT)); + dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", + RREG32(CP_STAT)); evergreen_mc_resume(rdev, &save); return 0; } diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index b50b15c70498..d3bd098e4e19 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -88,6 +88,10 @@ #define CONFIG_MEMSIZE 0x5428 #define CP_COHER_BASE 0x85F8 +#define CP_STALLED_STAT1 0x8674 +#define CP_STALLED_STAT2 0x8678 +#define CP_BUSY_STAT 0x867C +#define CP_STAT 0x8680 #define CP_ME_CNTL 0x86D8 #define CP_ME_HALT (1 << 28) #define CP_PFP_HALT (1 << 26) diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index ddfef8cdd838..9945d86d9001 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1122,6 +1122,14 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev) RREG32(GRBM_STATUS_SE1)); dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", RREG32(SRBM_STATUS)); + dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", + RREG32(CP_STALLED_STAT1)); + dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n", + RREG32(CP_STALLED_STAT2)); + dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n", + RREG32(CP_BUSY_STAT)); + dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", + RREG32(CP_STAT)); dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n", RREG32(0x14F8)); dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n", @@ -1170,6 +1178,14 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev) RREG32(GRBM_STATUS_SE1)); dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", RREG32(SRBM_STATUS)); + dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", + RREG32(CP_STALLED_STAT1)); + dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n", + RREG32(CP_STALLED_STAT2)); + dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n", + RREG32(CP_BUSY_STAT)); + dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", + RREG32(CP_STAT)); evergreen_mc_resume(rdev, &save); return 0; } diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h index a0b98066e207..870db340d377 100644 --- a/drivers/gpu/drm/radeon/nid.h +++ b/drivers/gpu/drm/radeon/nid.h @@ -236,6 +236,10 @@ #define CP_SEM_WAIT_TIMER 0x85BC #define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8 #define CP_COHER_CNTL2 0x85E8 +#define CP_STALLED_STAT1 0x8674 +#define CP_STALLED_STAT2 0x8678 +#define CP_BUSY_STAT 0x867C +#define CP_STAT 0x8680 #define CP_ME_CNTL 0x86D8 #define CP_ME_HALT (1 << 28) #define CP_PFP_HALT (1 << 26) diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index c5b2e9069362..e2dee80e27dc 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1289,6 +1289,14 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) RREG32(R_008014_GRBM_STATUS2)); dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", RREG32(R_000E50_SRBM_STATUS)); + dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", + RREG32(CP_STALLED_STAT1)); + dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n", + RREG32(CP_STALLED_STAT2)); + dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n", + RREG32(CP_BUSY_STAT)); + dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", + RREG32(CP_STAT)); rv515_mc_stop(rdev, &save); if (r600_mc_wait_for_idle(rdev)) { dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); @@ -1332,6 +1340,14 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) RREG32(R_008014_GRBM_STATUS2)); dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", RREG32(R_000E50_SRBM_STATUS)); + dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", + RREG32(CP_STALLED_STAT1)); + dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n", + RREG32(CP_STALLED_STAT2)); + dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n", + RREG32(CP_BUSY_STAT)); + dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", + RREG32(CP_STAT)); rv515_mc_resume(rdev, &save); return 0; } diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 025fd5b6c08c..4b116ae75fc2 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -153,6 +153,9 @@ #define CONFIG_MEMSIZE 0x5428 #define CONFIG_CNTL 0x5424 +#define CP_STALLED_STAT1 0x8674 +#define CP_STALLED_STAT2 0x8678 +#define CP_BUSY_STAT 0x867C #define CP_STAT 0x8680 #define CP_COHER_BASE 0x85F8 #define CP_DEBUG 0xC1FC -- cgit v1.2.3 From cdcac9cd7741af2c2b9255cbf060f772596907bb Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 27 Jun 2012 08:35:52 +0100 Subject: pci_regs: define LNKSTA2 pcie cap + bits. We need these for detecting the max link speed for drm drivers. Acked-by: Bjorn Helgaas Signed-off-by: Dave Airlie --- include/linux/pci_regs.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h index 4b608f543412..7f04132eb02d 100644 --- a/include/linux/pci_regs.h +++ b/include/linux/pci_regs.h @@ -521,6 +521,11 @@ #define PCI_EXP_OBFF_MSGA_EN 0x2000 /* OBFF enable with Message type A */ #define PCI_EXP_OBFF_MSGB_EN 0x4000 /* OBFF enable with Message type B */ #define PCI_EXP_OBFF_WAKE_EN 0x6000 /* OBFF using WAKE# signaling */ +#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */ +#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x01 /* Current Link Speed 2.5GT/s */ +#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x02 /* Current Link Speed 5.0GT/s */ +#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x04 /* Current Link Speed 8.0GT/s */ +#define PCI_EXP_LNKCAP2_CROSSLINK 0x100 /* Crosslink supported */ #define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ #define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */ -- cgit v1.2.3 From f42977841f4a28b82820384fdb9b9581b410dbb1 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 27 Jun 2012 08:35:53 +0100 Subject: drm/pci: add support for getting the supported link bw. This should work for PCIE3.0 as well. Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_pci.c | 49 +++++++++++++++++++++++++++++++++++++++++++++++ include/drm/drmP.h | 5 +++++ 2 files changed, 54 insertions(+) diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 13f3d936472f..5320364582ce 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -465,3 +465,52 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver) DRM_INFO("Module unloaded\n"); } EXPORT_SYMBOL(drm_pci_exit); + +int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask) +{ + struct pci_dev *root; + int pos; + u32 lnkcap, lnkcap2; + + *mask = 0; + if (!dev->pdev) + return -EINVAL; + + if (!pci_is_pcie(dev->pdev)) + return -EINVAL; + + root = dev->pdev->bus->self; + + pos = pci_pcie_cap(root); + if (!pos) + return -EINVAL; + + /* we've been informed via and serverworks don't make the cut */ + if (root->vendor == PCI_VENDOR_ID_VIA || + root->vendor == PCI_VENDOR_ID_SERVERWORKS) + return -EINVAL; + + pci_read_config_dword(root, pos + PCI_EXP_LNKCAP, &lnkcap); + pci_read_config_dword(root, pos + PCI_EXP_LNKCAP2, &lnkcap2); + + lnkcap &= PCI_EXP_LNKCAP_SLS; + lnkcap2 &= 0xfe; + + if (lnkcap2) { /* PCIE GEN 3.0 */ + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) + *mask |= DRM_PCIE_SPEED_25; + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) + *mask |= DRM_PCIE_SPEED_50; + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) + *mask |= DRM_PCIE_SPEED_80; + } else { + if (lnkcap & 1) + *mask |= DRM_PCIE_SPEED_25; + if (lnkcap & 2) + *mask |= DRM_PCIE_SPEED_50; + } + + DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2); + return 0; +} +EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 31ad880ca2ef..e4e3be3b9464 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1761,6 +1761,11 @@ extern int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver); +#define DRM_PCIE_SPEED_25 1 +#define DRM_PCIE_SPEED_50 2 +#define DRM_PCIE_SPEED_80 4 + +extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask); /* platform section */ extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device); -- cgit v1.2.3 From 197bbb3d464f33eac1b458e83c1929d2f268d4c9 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 27 Jun 2012 08:35:54 +0100 Subject: drm/radeon/kms: auto detect pcie link speed from root port This check the root ports supported link speeds and enables GEN2 mode if the 5.0 GT link speed is available. The first 3.0 cards are SI so they will probably need more investigation. Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 12 +++++++++++- drivers/gpu/drm/radeon/r600.c | 11 +++++++++++ drivers/gpu/drm/radeon/radeon_drv.c | 4 ++-- drivers/gpu/drm/radeon/rv770.c | 11 +++++++++++ 4 files changed, 35 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 14098ace1569..e585a3b947eb 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -3421,7 +3421,8 @@ void evergreen_fini(struct radeon_device *rdev) void evergreen_pcie_gen2_enable(struct radeon_device *rdev) { - u32 link_width_cntl, speed_cntl; + u32 link_width_cntl, speed_cntl, mask; + int ret; if (radeon_pcie_gen2 == 0) return; @@ -3436,6 +3437,15 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev) if (ASIC_IS_X2(rdev)) return; + ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask); + if (ret != 0) + return; + + if (!(mask & DRM_PCIE_SPEED_50)) + return; + + DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); + speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) || (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index e2dee80e27dc..637280f541a3 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -3681,6 +3681,8 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev) { u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp; u16 link_cntl2; + u32 mask; + int ret; if (radeon_pcie_gen2 == 0) return; @@ -3699,6 +3701,15 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev) if (rdev->family <= CHIP_R600) return; + ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask); + if (ret != 0) + return; + + if (!(mask & DRM_PCIE_SPEED_50)) + return; + + DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); + /* 55 nm r6xx asics */ if ((rdev->family == CHIP_RV670) || (rdev->family == CHIP_RV620) || diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 2c4d53fd20c5..042fcfff3bc4 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -133,7 +133,7 @@ int radeon_tv = 1; int radeon_audio = 0; int radeon_disp_priority = 0; int radeon_hw_i2c = 0; -int radeon_pcie_gen2 = 0; +int radeon_pcie_gen2 = -1; int radeon_msi = -1; int radeon_lockup_timeout = 10000; @@ -179,7 +179,7 @@ module_param_named(disp_priority, radeon_disp_priority, int, 0444); MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)"); module_param_named(hw_i2c, radeon_hw_i2c, int, 0444); -MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)"); +MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable)"); module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444); MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)"); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index eb4704e72bdb..ca8ffec10ff6 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -1112,6 +1112,8 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev) { u32 link_width_cntl, lanes, speed_cntl, tmp; u16 link_cntl2; + u32 mask; + int ret; if (radeon_pcie_gen2 == 0) return; @@ -1126,6 +1128,15 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev) if (ASIC_IS_X2(rdev)) return; + ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask); + if (ret != 0) + return; + + if (!(mask & DRM_PCIE_SPEED_50)) + return; + + DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); + /* advertise upconfig capability */ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); link_width_cntl &= ~LC_UPCONFIGURE_DIS; -- cgit v1.2.3 From 5bd42f69fbedfc12cd8161323a9ffab0b2586a2a Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 26 Jun 2012 14:53:07 +0100 Subject: drm/udl: port over blanking code from udlfb. This ports over the dpms code from udlfb, and should mean a better chance of turning on some udl devices. Signed-off-by: Dave Airlie --- drivers/gpu/drm/udl/udl_modeset.c | 44 ++++++++++++++++++++++++++++++++------- 1 file changed, 37 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index ac2d717714ce..f5dd89e891de 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -45,12 +45,25 @@ static char *udl_vidreg_unlock(char *buf) * 0x01 H and V sync off (screen blank but powered) * 0x07 DPMS powerdown (requires modeset to come back) */ -static char *udl_enable_hvsync(char *buf, bool enable) +static char *udl_set_blank(char *buf, int dpms_mode) { - if (enable) - return udl_set_register(buf, 0x1F, 0x00); - else - return udl_set_register(buf, 0x1F, 0x07); + u8 reg; + switch (dpms_mode) { + case DRM_MODE_DPMS_OFF: + reg = 0x07; + break; + case DRM_MODE_DPMS_STANDBY: + reg = 0x05; + break; + case DRM_MODE_DPMS_SUSPEND: + reg = 0x01; + break; + case DRM_MODE_DPMS_ON: + reg = 0x00; + break; + } + + return udl_set_register(buf, 0x1f, reg); } static char *udl_set_color_depth(char *buf, u8 selection) @@ -199,6 +212,20 @@ static char *udl_set_vid_cmds(char *wrptr, struct drm_display_mode *mode) return wrptr; } +static char *udl_dummy_render(char *wrptr) +{ + *wrptr++ = 0xAF; + *wrptr++ = 0x6A; /* copy */ + *wrptr++ = 0x00; /* from addr */ + *wrptr++ = 0x00; + *wrptr++ = 0x00; + *wrptr++ = 0x01; /* one pixel */ + *wrptr++ = 0x00; /* to address */ + *wrptr++ = 0x00; + *wrptr++ = 0x00; + return wrptr; +} + static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -235,9 +262,10 @@ static void udl_crtc_dpms(struct drm_crtc *crtc, int mode) buf = (char *)urb->transfer_buffer; buf = udl_vidreg_lock(buf); - buf = udl_enable_hvsync(buf, false); + buf = udl_set_blank(buf, mode); buf = udl_vidreg_unlock(buf); + buf = udl_dummy_render(buf); retval = udl_submit_urb(dev, urb, buf - (char *) urb->transfer_buffer); } else { @@ -306,9 +334,11 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc, wrptr = udl_set_base8bpp(wrptr, 2 * mode->vdisplay * mode->hdisplay); wrptr = udl_set_vid_cmds(wrptr, adjusted_mode); - wrptr = udl_enable_hvsync(wrptr, true); + wrptr = udl_set_blank(wrptr, DRM_MODE_DPMS_ON); wrptr = udl_vidreg_unlock(wrptr); + wrptr = udl_dummy_render(wrptr); + ufb->active_16 = true; if (old_fb) { struct udl_framebuffer *uold_fb = to_udl_fb(old_fb); -- cgit v1.2.3 From 834859c3abf9272bf55bd0d0c95e5a892f24dadc Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 25 Oct 2011 23:37:09 +0200 Subject: drm/via: clean up reclaim_buffers A few things - kill reclaim_buffers, it's never ever called because via does not set DRIVER_HAVE_DMA - inline the idlelock dance into the buffer reclaim logic and make it a simple preclose cleanup function - directly call the the dma_quiescent function and kill the needless if check. v2: Actually drop the idlelock when we take it. Reported by James Simmons. v3: Rebased onto latest drm-next. v4: Fixup the refactor. v5: More fixup the refactor - I've accidentally changed the check for any master to checking whether the closing fd is the master. v6: Don't forget to drop the idlelock in the early return path, too. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/via/via_drv.c | 4 +--- drivers/gpu/drm/via/via_mm.c | 13 +++++++++++-- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c index 02661f35f7a0..e927b4c052f5 100644 --- a/drivers/gpu/drm/via/via_drv.c +++ b/drivers/gpu/drm/via/via_drv.c @@ -75,6 +75,7 @@ static struct drm_driver driver = { .load = via_driver_load, .unload = via_driver_unload, .open = via_driver_open, + .preclose = via_reclaim_buffers_locked, .postclose = via_driver_postclose, .context_dtor = via_final_context, .get_vblank_counter = via_get_vblank_counter, @@ -85,9 +86,6 @@ static struct drm_driver driver = { .irq_uninstall = via_driver_irq_uninstall, .irq_handler = via_driver_irq_handler, .dma_quiescent = via_driver_dma_quiescent, - .reclaim_buffers = drm_core_reclaim_buffers, - .reclaim_buffers_locked = NULL, - .reclaim_buffers_idlelocked = via_reclaim_buffers_locked, .lastclose = via_lastclose, .ioctls = via_ioctls, .fops = &via_driver_fops, diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c index a3574d09a07d..acfcb358e7b7 100644 --- a/drivers/gpu/drm/via/via_mm.c +++ b/drivers/gpu/drm/via/via_mm.c @@ -215,14 +215,20 @@ void via_reclaim_buffers_locked(struct drm_device *dev, struct via_file_private *file_priv = file->driver_priv; struct via_memblock *entry, *next; + if (!(file->minor->master && file->master->lock.hw_lock)) + return; + + drm_idlelock_take(&file->master->lock); + mutex_lock(&dev->struct_mutex); if (list_empty(&file_priv->obj_list)) { mutex_unlock(&dev->struct_mutex); + drm_idlelock_release(&file->master->lock); + return; } - if (dev->driver->dma_quiescent) - dev->driver->dma_quiescent(dev); + via_driver_dma_quiescent(dev); list_for_each_entry_safe(entry, next, &file_priv->obj_list, owner_list) { @@ -231,5 +237,8 @@ void via_reclaim_buffers_locked(struct drm_device *dev, kfree(entry); } mutex_unlock(&dev->struct_mutex); + + drm_idlelock_release(&file->master->lock); + return; } -- cgit v1.2.3 From ea5e437406701838751ebc0f797d8b1c31df8121 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 25 Oct 2011 23:42:59 +0200 Subject: drm/sis: clean up reclaim_buffers Like for via. v2: Actually drop the idlelock again if taken. v3: Fixup. v4: Fixup the "has master" vs. "is master" confusion the refactor introduced. v5: Drop the idlelock in the early return path. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/sis/sis_drv.c | 3 +-- drivers/gpu/drm/sis/sis_mm.c | 13 +++++++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c index dd14cd1a0033..7f119870147c 100644 --- a/drivers/gpu/drm/sis/sis_drv.c +++ b/drivers/gpu/drm/sis/sis_drv.c @@ -105,10 +105,9 @@ static struct drm_driver driver = { .load = sis_driver_load, .unload = sis_driver_unload, .open = sis_driver_open, + .preclose = sis_reclaim_buffers_locked, .postclose = sis_driver_postclose, .dma_quiescent = sis_idle, - .reclaim_buffers = NULL, - .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked, .lastclose = sis_lastclose, .ioctls = sis_ioctls, .fops = &sis_driver_fops, diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c index dd4a316c3d74..5acc396ef93c 100644 --- a/drivers/gpu/drm/sis/sis_mm.c +++ b/drivers/gpu/drm/sis/sis_mm.c @@ -321,14 +321,20 @@ void sis_reclaim_buffers_locked(struct drm_device *dev, struct sis_file_private *file_priv = file->driver_priv; struct sis_memblock *entry, *next; + if (!(file->minor->master && file->master->lock.hw_lock)) + return; + + drm_idlelock_take(&file->master->lock); + mutex_lock(&dev->struct_mutex); if (list_empty(&file_priv->obj_list)) { mutex_unlock(&dev->struct_mutex); + drm_idlelock_release(&file->master->lock); + return; } - if (dev->driver->dma_quiescent) - dev->driver->dma_quiescent(dev); + sis_idle(dev); list_for_each_entry_safe(entry, next, &file_priv->obj_list, @@ -343,6 +349,9 @@ void sis_reclaim_buffers_locked(struct drm_device *dev, kfree(entry); } mutex_unlock(&dev->struct_mutex); + + drm_idlelock_release(&file->master->lock); + return; } -- cgit v1.2.3 From 3ae6b64400cc92530bcab73d13c6e1b7a5cfd915 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 25 Oct 2011 23:42:29 +0200 Subject: drm: kill reclaim_buffers_idlelocked functions The only two users are now folded into the drivers preclose functions, so this is unused. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fops.c | 8 -------- include/drm/drmP.h | 2 -- 2 files changed, 10 deletions(-) diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 123de28f94ef..f6231f0203e8 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -417,14 +417,6 @@ static void drm_master_release(struct drm_device *dev, struct file *filp) file_priv->master->lock.hw_lock) drm_reclaim_locked_buffers(dev, filp); - if (dev->driver->reclaim_buffers_idlelocked && - file_priv->master->lock.hw_lock) { - drm_idlelock_take(&file_priv->master->lock); - dev->driver->reclaim_buffers_idlelocked(dev, file_priv); - drm_idlelock_release(&file_priv->master->lock); - } - - if (drm_i_have_hw_lock(dev, file_priv)) { DRM_DEBUG("File %p released, freeing lock for context %d\n", filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index e4e3be3b9464..5f2211c9130d 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -880,8 +880,6 @@ struct drm_driver { struct drm_file * file_priv); void (*reclaim_buffers_locked) (struct drm_device *dev, struct drm_file *file_priv); - void (*reclaim_buffers_idlelocked) (struct drm_device *dev, - struct drm_file *file_priv); void (*set_version) (struct drm_device *dev, struct drm_set_version *sv); -- cgit v1.2.3 From d5346b3727bad3a033cb0a5b45a4a2da45119ec3 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 11 Jun 2012 21:50:23 +0200 Subject: Revert "Revert "drm/i810: cleanup reclaim_buffers"" This reverts commit 6e877b576ddf7cde5db2e9a6dcb56fef0ea77e64, reinstating the original commit: commit 87499ffdcb1c70f66988cd8febc4ead0ba2f9118 Author: Daniel Vetter Date: Tue Oct 25 23:51:24 2011 +0200 drm/i810: cleanup reclaim_buffers My dear old i815 always hits the deadlocked on reclaim_buffers warning. Switch over to the idlelock duct-tape on hope that works better. I've fired up my i815 and now closing glxgears doesn't take 5 seconds anymore. \o/ The original problem with that was that I've moved it ahead in the series so that it could be included despite some patches not being ready quite yet. The little problem is that this patch required some of the previous rework to work correctly. Now that everything is in the right order again, this actually works on my i810 and does speed up closing gl apps as the original commit claimed. Without hanging the machine, as the revert says. Signed-Off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/i810/i810_dma.c | 17 +++++++++++------ drivers/gpu/drm/i810/i810_drv.c | 1 - drivers/gpu/drm/i810/i810_drv.h | 6 ++---- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index fa9439159ebd..57d892eaaa6e 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -881,7 +881,7 @@ static int i810_flush_queue(struct drm_device *dev) } /* Must be called with the lock held */ -static void i810_reclaim_buffers(struct drm_device *dev, +void i810_driver_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; @@ -1220,12 +1220,17 @@ void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv) if (dev_priv->page_flipping) i810_do_cleanup_pageflip(dev); } -} -void i810_driver_reclaim_buffers_locked(struct drm_device *dev, - struct drm_file *file_priv) -{ - i810_reclaim_buffers(dev, file_priv); + if (file_priv->master && file_priv->master->lock.hw_lock) { + drm_idlelock_take(&file_priv->master->lock); + i810_driver_reclaim_buffers(dev, file_priv); + drm_idlelock_release(&file_priv->master->lock); + } else { + /* master disappeared, clean up stuff anyway and hope nothing + * goes wrong */ + i810_driver_reclaim_buffers(dev, file_priv); + } + } int i810_driver_dma_quiescent(struct drm_device *dev) diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c index ec12f7dc717a..053f1ee58393 100644 --- a/drivers/gpu/drm/i810/i810_drv.c +++ b/drivers/gpu/drm/i810/i810_drv.c @@ -63,7 +63,6 @@ static struct drm_driver driver = { .lastclose = i810_driver_lastclose, .preclose = i810_driver_preclose, .device_is_agp = i810_driver_device_is_agp, - .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked, .dma_quiescent = i810_driver_dma_quiescent, .ioctls = i810_ioctls, .fops = &i810_driver_fops, diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h index c9339f481795..6e0acad9e0f5 100644 --- a/drivers/gpu/drm/i810/i810_drv.h +++ b/drivers/gpu/drm/i810/i810_drv.h @@ -116,14 +116,12 @@ typedef struct drm_i810_private { /* i810_dma.c */ extern int i810_driver_dma_quiescent(struct drm_device *dev); -extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev, - struct drm_file *file_priv); +void i810_driver_reclaim_buffers(struct drm_device *dev, + struct drm_file *file_priv); extern int i810_driver_load(struct drm_device *, unsigned long flags); extern void i810_driver_lastclose(struct drm_device *dev); extern void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv); -extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev, - struct drm_file *file_priv); extern int i810_driver_device_is_agp(struct drm_device *dev); extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg); -- cgit v1.2.3 From 923d1fe86b4a98292bee8f08f386eb3eb4c7927e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 25 Oct 2011 23:57:28 +0200 Subject: drm: kill reclaim_buffers_locked i810 was the last user of this code, with that gone, kill it. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fops.c | 46 +------------------------------------ drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 1 - include/drm/drmP.h | 2 -- 3 files changed, 1 insertion(+), 48 deletions(-) diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index f6231f0203e8..6cf9369e8cd5 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -370,53 +370,10 @@ int drm_fasync(int fd, struct file *filp, int on) } EXPORT_SYMBOL(drm_fasync); -/* - * Reclaim locked buffers; note that this may be a bad idea if the current - * context doesn't have the hw lock... - */ -static void drm_reclaim_locked_buffers(struct drm_device *dev, struct file *f) -{ - struct drm_file *file_priv = f->private_data; - - if (drm_i_have_hw_lock(dev, file_priv)) { - dev->driver->reclaim_buffers_locked(dev, file_priv); - } else { - unsigned long _end = jiffies + 3 * DRM_HZ; - int locked = 0; - - drm_idlelock_take(&file_priv->master->lock); - - /* - * Wait for a while. - */ - do { - spin_lock_bh(&file_priv->master->lock.spinlock); - locked = file_priv->master->lock.idle_has_lock; - spin_unlock_bh(&file_priv->master->lock.spinlock); - if (locked) - break; - schedule(); - } while (!time_after_eq(jiffies, _end)); - - if (!locked) { - DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n" - "\tdriver to use reclaim_buffers_idlelocked() instead.\n" - "\tI will go on reclaiming the buffers anyway.\n"); - } - - dev->driver->reclaim_buffers_locked(dev, file_priv); - drm_idlelock_release(&file_priv->master->lock); - } -} - static void drm_master_release(struct drm_device *dev, struct file *filp) { struct drm_file *file_priv = filp->private_data; - if (dev->driver->reclaim_buffers_locked && - file_priv->master->lock.hw_lock) - drm_reclaim_locked_buffers(dev, filp); - if (drm_i_have_hw_lock(dev, file_priv)) { DRM_DEBUG("File %p released, freeing lock for context %d\n", filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); @@ -424,8 +381,7 @@ static void drm_master_release(struct drm_device *dev, struct file *filp) _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); } - if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && - !dev->driver->reclaim_buffers_locked) { + if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) { dev->driver->reclaim_buffers(dev, file_priv); } } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index ee24d216aa85..5d5632f5265b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1147,7 +1147,6 @@ static struct drm_driver driver = { .get_vblank_counter = vmw_get_vblank_counter, .enable_vblank = vmw_enable_vblank, .disable_vblank = vmw_disable_vblank, - .reclaim_buffers_locked = NULL, .ioctls = vmw_ioctls, .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), .dma_quiescent = NULL, /*vmw_dma_quiescent, */ diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 5f2211c9130d..259a7619c464 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -878,8 +878,6 @@ struct drm_driver { void (*irq_uninstall) (struct drm_device *dev); void (*reclaim_buffers) (struct drm_device *dev, struct drm_file * file_priv); - void (*reclaim_buffers_locked) (struct drm_device *dev, - struct drm_file *file_priv); void (*set_version) (struct drm_device *dev, struct drm_set_version *sv); -- cgit v1.2.3 From e2b3c5b64b35af35b0715936f88212c457394c2f Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 26 Oct 2011 00:14:15 +0200 Subject: drm/savage: clean up reclaim_buffers The reclaim_buffers function of the savage driver actually wants to run with the hw_lock held - at least there are printks in the call-chain to that effect. But the drm core only calls reclaim_buffers as used by savage _after_ forcefully dropping the hwlock (in case it's still hold by the closing fd). So do the same idlelock dance as for the other dma drivers and hope that papers over any issues. v2: Don't let the idlelock linger around. Signed-off-by: Daniel Vetter Tested-by: Tormod Volden Signed-off-by: Dave Airlie --- drivers/gpu/drm/savage/savage_bci.c | 9 +++++++-- drivers/gpu/drm/savage/savage_drv.c | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c index 6eb507a5d130..1efbb9075837 100644 --- a/drivers/gpu/drm/savage/savage_bci.c +++ b/drivers/gpu/drm/savage/savage_bci.c @@ -1050,6 +1050,7 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; drm_savage_private_t *dev_priv = dev->dev_private; + int release_idlelock = 0; int i; if (!dma) @@ -1059,7 +1060,10 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) if (!dma->buflist) return; - /*i830_flush_queue(dev); */ + if (file_priv->master && file_priv->master->lock.hw_lock) { + drm_idlelock_take(&file_priv->master->lock); + release_idlelock = 1; + } for (i = 0; i < dma->buf_count; i++) { struct drm_buf *buf = dma->buflist[i]; @@ -1075,7 +1079,8 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) } } - drm_core_reclaim_buffers(dev, file_priv); + if (release_idlelock) + drm_idlelock_release(&file_priv->master->lock); } struct drm_ioctl_desc savage_ioctls[] = { diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c index 89afe0b83643..d31d4cca9a4c 100644 --- a/drivers/gpu/drm/savage/savage_drv.c +++ b/drivers/gpu/drm/savage/savage_drv.c @@ -52,9 +52,9 @@ static struct drm_driver driver = { .dev_priv_size = sizeof(drm_savage_buf_priv_t), .load = savage_driver_load, .firstopen = savage_driver_firstopen, + .preclose = savage_reclaim_buffers, .lastclose = savage_driver_lastclose, .unload = savage_driver_unload, - .reclaim_buffers = savage_reclaim_buffers, .ioctls = savage_ioctls, .dma_ioctl = savage_bci_buffers, .fops = &savage_driver_fops, -- cgit v1.2.3 From b0071efe827f68cf173e1a8868b70618e9aca7d7 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 26 Oct 2011 00:20:57 +0200 Subject: drm: kill reclaim_buffers callback All leftover users either haven't set DRIVER_HAVE_DMA, in which case this will never be called, or use the drm_core implementation. Call that directly in the only callsite. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fops.c | 5 ++--- drivers/gpu/drm/gma500/psb_drv.c | 1 - drivers/gpu/drm/i915/i915_drv.c | 1 - drivers/gpu/drm/mga/mga_drv.c | 1 - drivers/gpu/drm/nouveau/nouveau_drv.c | 1 - drivers/gpu/drm/r128/r128_drv.c | 1 - drivers/gpu/drm/radeon/radeon_drv.c | 2 -- drivers/gpu/drm/tdfx/tdfx_drv.c | 1 - include/drm/drmP.h | 2 -- 9 files changed, 2 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 6cf9369e8cd5..96b8c8f9c028 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -381,9 +381,8 @@ static void drm_master_release(struct drm_device *dev, struct file *filp) _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); } - if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) { - dev->driver->reclaim_buffers(dev, file_priv); - } + if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) + drm_core_reclaim_buffers(dev, file_priv); } static void drm_events_release(struct drm_file *file_priv) diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index caba6e08693c..cd1dd1b14c76 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -632,7 +632,6 @@ static struct drm_driver driver = { .open = psb_driver_open, .preclose = psb_driver_preclose, .postclose = psb_driver_close, - .reclaim_buffers = drm_core_reclaim_buffers, .gem_init_object = psb_gem_init_object, .gem_free_object = psb_gem_free_object, diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index e754cdfaec79..ed22612bc847 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -940,7 +940,6 @@ static struct drm_driver driver = { .resume = i915_resume, .device_is_agp = i915_driver_device_is_agp, - .reclaim_buffers = drm_core_reclaim_buffers, .master_create = i915_master_create, .master_destroy = i915_master_destroy, #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c index f9a925d58819..b1bb46de3f5a 100644 --- a/drivers/gpu/drm/mga/mga_drv.c +++ b/drivers/gpu/drm/mga/mga_drv.c @@ -75,7 +75,6 @@ static struct drm_driver driver = { .irq_postinstall = mga_driver_irq_postinstall, .irq_uninstall = mga_driver_irq_uninstall, .irq_handler = mga_driver_irq_handler, - .reclaim_buffers = drm_core_reclaim_buffers, .ioctls = mga_ioctls, .dma_ioctl = mga_dma_buffers, .fops = &mga_driver_fops, diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index cad254c8e387..b4d1b4afcac5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -422,7 +422,6 @@ static struct drm_driver driver = { .get_vblank_counter = drm_vblank_count, .enable_vblank = nouveau_vblank_enable, .disable_vblank = nouveau_vblank_disable, - .reclaim_buffers = drm_core_reclaim_buffers, .ioctls = nouveau_ioctls, .fops = &nouveau_driver_fops, diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c index 88718fad5d6d..2666a5308ab9 100644 --- a/drivers/gpu/drm/r128/r128_drv.c +++ b/drivers/gpu/drm/r128/r128_drv.c @@ -71,7 +71,6 @@ static struct drm_driver driver = { .irq_postinstall = r128_driver_irq_postinstall, .irq_uninstall = r128_driver_irq_uninstall, .irq_handler = r128_driver_irq_handler, - .reclaim_buffers = drm_core_reclaim_buffers, .ioctls = r128_ioctls, .dma_ioctl = r128_cce_buffers, .fops = &r128_driver_fops, diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 042fcfff3bc4..dcea6f01ae4e 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -262,7 +262,6 @@ static struct drm_driver driver_old = { .irq_postinstall = radeon_driver_irq_postinstall, .irq_uninstall = radeon_driver_irq_uninstall, .irq_handler = radeon_driver_irq_handler, - .reclaim_buffers = drm_core_reclaim_buffers, .ioctls = radeon_ioctls, .dma_ioctl = radeon_cp_buffers, .fops = &radeon_driver_old_fops, @@ -365,7 +364,6 @@ static struct drm_driver kms_driver = { .irq_postinstall = radeon_driver_irq_postinstall_kms, .irq_uninstall = radeon_driver_irq_uninstall_kms, .irq_handler = radeon_driver_irq_handler_kms, - .reclaim_buffers = drm_core_reclaim_buffers, .ioctls = radeon_ioctls_kms, .gem_init_object = radeon_gem_object_init, .gem_free_object = radeon_gem_object_free, diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c index 1613c78544c0..90f6b13acfac 100644 --- a/drivers/gpu/drm/tdfx/tdfx_drv.c +++ b/drivers/gpu/drm/tdfx/tdfx_drv.c @@ -54,7 +54,6 @@ static const struct file_operations tdfx_driver_fops = { static struct drm_driver driver = { .driver_features = DRIVER_USE_MTRR, - .reclaim_buffers = drm_core_reclaim_buffers, .fops = &tdfx_driver_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 259a7619c464..022db5a768aa 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -876,8 +876,6 @@ struct drm_driver { void (*irq_preinstall) (struct drm_device *dev); int (*irq_postinstall) (struct drm_device *dev); void (*irq_uninstall) (struct drm_device *dev); - void (*reclaim_buffers) (struct drm_device *dev, - struct drm_file * file_priv); void (*set_version) (struct drm_device *dev, struct drm_set_version *sv); -- cgit v1.2.3 From 4c373790a4d4d667d1ab38b1fe2bbf6a8322e93b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 26 Oct 2011 00:53:57 +0200 Subject: drm: ditch strange DRIVER_DMA_QUEUE only error bail-out Only one driver (i810) even sets that flag. Now the actual locking code uncoditionally promotes lock->context to an unsigned int. Closer inspection of the userspace reveals that the drm lock context is defined as an unsigned int (at least on linux). I suspect we just have a strange case of signedness confusion going on. Tested on my i815, doesn't seem to break anything. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_lock.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index 521152041691..32039553e172 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c @@ -70,10 +70,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) lock->context, task_pid_nr(current), master->lock.hw_lock->lock, lock->flags); - if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)) - if (lock->context < 0) - return -EINVAL; - add_wait_queue(&master->lock.lock_queue, &entry); spin_lock_bh(&master->lock.spinlock); master->lock.user_waiters++; -- cgit v1.2.3 From a344a7e7c27776950a70ce4b829a9ac15a212e65 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 26 Oct 2011 00:54:41 +0200 Subject: drm: kill dma queue support Absolutely unused. All the values are only ever initialized and then used at most in some debug printout functions. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_bufs.c | 16 ++-------------- drivers/gpu/drm/drm_debugfs.c | 1 - drivers/gpu/drm/drm_dma.c | 5 ----- drivers/gpu/drm/drm_drv.c | 11 ----------- drivers/gpu/drm/drm_fops.c | 4 ---- drivers/gpu/drm/drm_info.c | 36 ------------------------------------ drivers/gpu/drm/drm_proc.c | 1 - drivers/gpu/drm/i810/i810_drv.c | 2 +- include/drm/drmP.h | 8 +------- 9 files changed, 4 insertions(+), 80 deletions(-) diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 348b367debeb..b356c719f2f1 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -641,8 +641,6 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request) if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; - if (dev->queue_count) - return -EBUSY; /* Not while in use */ /* Make sure buffers are located in AGP memory that we own */ valid = 0; @@ -704,7 +702,6 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request) buf->next = NULL; buf->waiting = 0; buf->pending = 0; - init_waitqueue_head(&buf->dma_wait); buf->file_priv = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; @@ -796,13 +793,11 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) order = drm_order(request->size); size = 1 << order; - DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n", - request->count, request->size, size, order, dev->queue_count); + DRM_DEBUG("count=%d, size=%d (%d), order=%d\n", + request->count, request->size, size, order); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; - if (dev->queue_count) - return -EBUSY; /* Not while in use */ alignment = (request->flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size) : size; @@ -904,7 +899,6 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) buf->next = NULL; buf->waiting = 0; buf->pending = 0; - init_waitqueue_head(&buf->dma_wait); buf->file_priv = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; @@ -1019,8 +1013,6 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; - if (dev->queue_count) - return -EBUSY; /* Not while in use */ spin_lock(&dev->count_lock); if (dev->buf_use) { @@ -1071,7 +1063,6 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request buf->next = NULL; buf->waiting = 0; buf->pending = 0; - init_waitqueue_head(&buf->dma_wait); buf->file_priv = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; @@ -1177,8 +1168,6 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; - if (dev->queue_count) - return -EBUSY; /* Not while in use */ spin_lock(&dev->count_lock); if (dev->buf_use) { @@ -1228,7 +1217,6 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request buf->next = NULL; buf->waiting = 0; buf->pending = 0; - init_waitqueue_head(&buf->dma_wait); buf->file_priv = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 1c7a1c0d3edd..70b13fc19396 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -46,7 +46,6 @@ static struct drm_info_list drm_debugfs_list[] = { {"name", drm_name_info, 0}, {"vm", drm_vm_info, 0}, {"clients", drm_clients_info, 0}, - {"queues", drm_queues_info, 0}, {"bufs", drm_bufs_info, 0}, {"gem_names", drm_gem_name_info, DRIVER_GEM}, #if DRM_DEBUG_CODE diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c index cfb4e333ec0f..08f5e5309b22 100644 --- a/drivers/gpu/drm/drm_dma.c +++ b/drivers/gpu/drm/drm_dma.c @@ -120,11 +120,6 @@ void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf) buf->pending = 0; buf->file_priv = NULL; buf->used = 0; - - if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) - && waitqueue_active(&buf->dma_wait)) { - wake_up_interruptible(&buf->dma_wait); - } } /** diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 946bd91c57ec..9238de4009fa 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -182,7 +182,6 @@ static struct drm_ioctl_desc drm_ioctls[] = { int drm_lastclose(struct drm_device * dev) { struct drm_vma_entry *vma, *vma_temp; - int i; DRM_DEBUG("\n"); @@ -228,16 +227,6 @@ int drm_lastclose(struct drm_device * dev) kfree(vma); } - if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) { - for (i = 0; i < dev->queue_count; i++) { - kfree(dev->queuelist[i]); - dev->queuelist[i] = NULL; - } - kfree(dev->queuelist); - dev->queuelist = NULL; - } - dev->queue_count = 0; - if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && !drm_core_check_feature(dev, DRIVER_MODESET)) drm_dma_takedown(dev); diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 96b8c8f9c028..c6f5f8951482 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -75,10 +75,6 @@ static int drm_setup(struct drm_device * dev) dev->sigdata.lock = NULL; - dev->queue_count = 0; - dev->queue_reserved = 0; - dev->queue_slots = 0; - dev->queuelist = NULL; dev->context_flag = 0; dev->interrupt_flag = 0; dev->dma_flag = 0; diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 4076a33e5cad..8928edbb94c7 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c @@ -109,42 +109,6 @@ int drm_vm_info(struct seq_file *m, void *data) return 0; } -/** - * Called when "/proc/dri/.../queues" is read. - */ -int drm_queues_info(struct seq_file *m, void *data) -{ - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - int i; - struct drm_queue *q; - - mutex_lock(&dev->struct_mutex); - seq_printf(m, " ctx/flags use fin" - " blk/rw/rwf wait flushed queued" - " locks\n\n"); - for (i = 0; i < dev->queue_count; i++) { - q = dev->queuelist[i]; - atomic_inc(&q->use_count); - seq_printf(m, "%5d/0x%03x %5d %5d" - " %5d/%c%c/%c%c%c %5Zd\n", - i, - q->flags, - atomic_read(&q->use_count), - atomic_read(&q->finalization), - atomic_read(&q->block_count), - atomic_read(&q->block_read) ? 'r' : '-', - atomic_read(&q->block_write) ? 'w' : '-', - waitqueue_active(&q->read_queue) ? 'r' : '-', - waitqueue_active(&q->write_queue) ? 'w' : '-', - waitqueue_active(&q->flush_queue) ? 'f' : '-', - DRM_BUFCOUNT(&q->waitlist)); - atomic_dec(&q->use_count); - } - mutex_unlock(&dev->struct_mutex); - return 0; -} - /** * Called when "/proc/dri/.../bufs" is read. */ diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c index fff87221f9e9..371c695322d9 100644 --- a/drivers/gpu/drm/drm_proc.c +++ b/drivers/gpu/drm/drm_proc.c @@ -53,7 +53,6 @@ static struct drm_info_list drm_proc_list[] = { {"name", drm_name_info, 0}, {"vm", drm_vm_info, 0}, {"clients", drm_clients_info, 0}, - {"queues", drm_queues_info, 0}, {"bufs", drm_bufs_info, 0}, {"gem_names", drm_gem_name_info, DRIVER_GEM}, #if DRM_DEBUG_CODE diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c index 053f1ee58393..f9924ad04d09 100644 --- a/drivers/gpu/drm/i810/i810_drv.c +++ b/drivers/gpu/drm/i810/i810_drv.c @@ -57,7 +57,7 @@ static const struct file_operations i810_driver_fops = { static struct drm_driver driver = { .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | - DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE, + DRIVER_HAVE_DMA, .dev_priv_size = sizeof(drm_i810_buf_priv_t), .load = i810_driver_load, .lastclose = i810_driver_lastclose, diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 022db5a768aa..d6b67bb9075f 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -348,7 +348,6 @@ struct drm_buf { struct drm_buf *next; /**< Kernel-only: used for free list */ __volatile__ int waiting; /**< On kernel DMA queue */ __volatile__ int pending; /**< On hardware DMA queue */ - wait_queue_head_t dma_wait; /**< Processes waiting */ struct drm_file *file_priv; /**< Private of holding file descr */ int context; /**< Kernel queue for this buffer */ int while_locked; /**< Dispatch this buffer while locked */ @@ -1102,12 +1101,8 @@ struct drm_device { /*@} */ - /** \name DMA queues (contexts) */ + /** \name DMA support */ /*@{ */ - int queue_count; /**< Number of active DMA queues */ - int queue_reserved; /**< Number of reserved DMA queues */ - int queue_slots; /**< Actual length of queuelist */ - struct drm_queue **queuelist; /**< Vector of pointers to DMA queues */ struct drm_device_dma *dma; /**< Optional pointer for DMA support */ /*@} */ @@ -1534,7 +1529,6 @@ extern int drm_debugfs_cleanup(struct drm_minor *minor); /* Info file support */ extern int drm_name_info(struct seq_file *m, void *data); extern int drm_vm_info(struct seq_file *m, void *data); -extern int drm_queues_info(struct seq_file *m, void *data); extern int drm_bufs_info(struct seq_file *m, void *data); extern int drm_vblank_info(struct seq_file *m, void *data); extern int drm_clients_info(struct seq_file *m, void* data); -- cgit v1.2.3 From 67cb4b4dd4b3bb38626a841200638a4e953ea3fd Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 26 Oct 2011 00:31:26 +0200 Subject: drm: unconditionally clean up dma buffers of closing clients With the last patch to ditch DMA_QUEUE support, we should be able to call the dma cleanup uncoditionally, even when the master has disappeared. Do so because it just makes more sense. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fops.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index c6f5f8951482..d25a61739a7b 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -376,9 +376,6 @@ static void drm_master_release(struct drm_device *dev, struct file *filp) drm_lock_free(&file_priv->master->lock, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); } - - if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) - drm_core_reclaim_buffers(dev, file_priv); } static void drm_events_release(struct drm_file *file_priv) @@ -448,6 +445,9 @@ int drm_release(struct inode *inode, struct file *filp) if (file_priv->minor->master) drm_master_release(dev, filp); + if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) + drm_core_reclaim_buffers(dev, file_priv); + drm_events_release(file_priv); if (dev->driver->driver_features & DRIVER_MODESET) -- cgit v1.2.3 From 26587e69946249dc8327c5912d86320c3f63b2c5 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 26 Oct 2011 01:03:05 +0200 Subject: drm: kill i915/i830 ids from drm_pciids.h Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- include/drm/drm_pciids.h | 42 ------------------------------------------ 1 file changed, 42 deletions(-) diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index a7aec391b7b7..7ff5c99b1638 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -686,14 +686,6 @@ {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0, 0, 0} -#define i830_PCI_IDS \ - {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0x8086, 0x358e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0, 0, 0} - #define gamma_PCI_IDS \ {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0, 0, 0} @@ -726,37 +718,3 @@ #define ffb_PCI_IDS \ {0, 0, 0} - -#define i915_PCI_IDS \ - {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x258a, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x27a2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x27ae, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x2972, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x29a2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x29b2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x29c2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x29d2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x2a12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x2a42, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x2e32, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x2e42, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x0042, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x0046, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0x8086, 0x0102, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ - {0, 0, 0} -- cgit v1.2.3 From 83bc5fd29afff5898cadf87fb29eb9260eecc63e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 24 Jun 2012 19:57:24 +0200 Subject: drm/sis: fixup sis_mm ioctl structs Userspace uses long in quite a few places more than the kernel. Which gives me neat proof that I'm the only guy on this side of the galaxy who ever tried to run glxgears on a 64bit machine with sis graphics on linux. Note that the longs in drm_sis_mem_t aren't aligned properly, so this won't even work with 32bit userspace on 64bit kernel as-is. Hence the patch can't break that, either. Nope, I'm not nuts enough to write the 32bit ioctl compat layer for this and test it with some wine app. Even though hunting the ebay dungeons for a sis card actually supported by the mesa drivers casts some doubts on this ... Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/sis/sis_mm.c | 6 +++--- include/drm/sis_drm.h | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c index 5acc396ef93c..2c231070d250 100644 --- a/drivers/gpu/drm/sis/sis_mm.c +++ b/drivers/gpu/drm/sis/sis_mm.c @@ -74,7 +74,7 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file dev_priv->vram_offset = fb->offset; mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size); + DRM_DEBUG("offset = %lu, size = %lu\n", fb->offset, fb->size); return 0; } @@ -161,7 +161,7 @@ fail_alloc: mem->size = 0; mem->free = 0; - DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size, + DRM_DEBUG("alloc %d, size = %ld, offset = %ld\n", pool, mem->size, mem->offset); return retval; @@ -215,7 +215,7 @@ static int sis_ioctl_agp_init(struct drm_device *dev, void *data, dev_priv->agp_offset = agp->offset; mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size); + DRM_DEBUG("offset = %lu, size = %lu\n", agp->offset, agp->size); return 0; } diff --git a/include/drm/sis_drm.h b/include/drm/sis_drm.h index 035b804dda6d..df3763222d73 100644 --- a/include/drm/sis_drm.h +++ b/include/drm/sis_drm.h @@ -51,17 +51,17 @@ typedef struct { int context; - unsigned int offset; - unsigned int size; + unsigned long offset; + unsigned long size; unsigned long free; } drm_sis_mem_t; typedef struct { - unsigned int offset, size; + unsigned long offset, size; } drm_sis_agp_t; typedef struct { - unsigned int offset, size; + unsigned long offset, size; } drm_sis_fb_t; struct sis_file_private { -- cgit v1.2.3 From 4f91dd6f27f015f09828e42123bdabce0c8441e6 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 18 Jul 2012 10:10:09 -0700 Subject: drm/i915/context/: s/CTX/CXT *sigh* the docs had it spelled wrong, corrected it, and then proceeded to re-do the original error. The original code preserved this history, and this patch attempts to keep in sync with the current docs. Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_context.c | 4 ++-- drivers/gpu/drm/i915/i915_reg.h | 18 +++++++++--------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 9ae3f2cf414e..18eee8d2522d 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -112,8 +112,8 @@ static int get_context_size(struct drm_device *dev) ret = GEN6_CXT_TOTAL_SIZE(reg) * 64; break; case 7: - reg = I915_READ(GEN7_CTX_SIZE); - ret = GEN7_CTX_TOTAL_SIZE(reg) * 64; + reg = I915_READ(GEN7_CXT_SIZE); + ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; break; default: BUG(); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 1218069c7f66..9019194068e8 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1482,15 +1482,15 @@ GEN6_CXT_RENDER_SIZE(cxt_reg) + \ GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \ GEN6_CXT_PIPELINE_SIZE(cxt_reg)) -#define GEN7_CTX_SIZE 0x21a8 -#define GEN7_CTX_RENDER_SIZE(ctx_reg) ((ctx_reg >> 16) & 0x3f) -#define GEN7_CTX_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f) -#define GEN7_CTX_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7) -#define GEN7_CTX_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f) -#define GEN7_CTX_TOTAL_SIZE(ctx_reg) (GEN7_CTX_RENDER_SIZE(ctx_reg) + \ - GEN7_CTX_EXTENDED_SIZE(ctx_reg) + \ - GEN7_CTX_GT1_SIZE(ctx_reg) + \ - GEN7_CTX_VFSTATE_SIZE(ctx_reg)) +#define GEN7_CXT_SIZE 0x21a8 +#define GEN7_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 16) & 0x3f) +#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f) +#define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7) +#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f) +#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_RENDER_SIZE(ctx_reg) + \ + GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ + GEN7_CXT_GT1_SIZE(ctx_reg) + \ + GEN7_CXT_VFSTATE_SIZE(ctx_reg)) /* * Overlay regs -- cgit v1.2.3 From 6a4ea1248cc10354f36b7917eb412d01e979d5ad Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 18 Jul 2012 10:10:10 -0700 Subject: drm/i915/context: Add missing IVB context sizes There were some fields missed. Daniel pointed this out in review, and I know I fixed it, but something happened somehow and some time. Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 9019194068e8..ac5688e8b01d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1483,11 +1483,15 @@ GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \ GEN6_CXT_PIPELINE_SIZE(cxt_reg)) #define GEN7_CXT_SIZE 0x21a8 +#define GEN7_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 25) & 0x7f) +#define GEN7_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 22) & 0x7) #define GEN7_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 16) & 0x3f) #define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f) #define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7) #define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f) -#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_RENDER_SIZE(ctx_reg) + \ +#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_POWER_SIZE(ctx_reg) + \ + GEN7_CXT_RING_SIZE(ctx_reg) + \ + GEN7_CXT_RENDER_SIZE(ctx_reg) + \ GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ GEN7_CXT_GT1_SIZE(ctx_reg) + \ GEN7_CXT_VFSTATE_SIZE(ctx_reg)) -- cgit v1.2.3 From ff9282613f6796db3fe85dc6cbb995223078f581 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 5 Jul 2012 15:02:17 +0100 Subject: drm/i915: Only request PM interrupts for the events we handled There is little point waking up every 10ms to service an interrupt which we then promptly ignore. So only program the the PMIER to enable interrupts for those events which we do handle, not all of them! Signed-off-by: Chris Wilson Cc: Jesse Barnes Cc: Eugeni Dodonov Cc: Ben Widawsky Reviewed-by: Ben Widawsky Reviewed-by: Jesse Barnes Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_pm.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 0bb69fd255a9..a1495cd2a8e7 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2491,14 +2491,7 @@ static void gen6_enable_rps(struct drm_device *dev) gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); /* requires MSI enabled */ - I915_WRITE(GEN6_PMIER, - GEN6_PM_MBOX_EVENT | - GEN6_PM_THERMAL_EVENT | - GEN6_PM_RP_DOWN_TIMEOUT | - GEN6_PM_RP_UP_THRESHOLD | - GEN6_PM_RP_DOWN_THRESHOLD | - GEN6_PM_RP_UP_EI_EXPIRED | - GEN6_PM_RP_DOWN_EI_EXPIRED); + I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS); spin_lock_irq(&dev_priv->rps_lock); WARN_ON(dev_priv->pm_iir != 0); I915_WRITE(GEN6_PMIMR, 0); -- cgit v1.2.3 From 0232e927f8b0b37f5c82648f9bc45e55fb245068 Mon Sep 17 00:00:00 2001 From: Eugeni Dodonov Date: Fri, 6 Jul 2012 15:42:36 -0300 Subject: drm/i915: initialize power wells in modeset_init_hw This initializes power wells within the modeset_init_hw routine. Testing has shown that this works for both driver load time and for suspend-resume code paths. Signed-off-by: Eugeni Dodonov Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 5 +++++ drivers/gpu/drm/i915/intel_drv.h | 1 + drivers/gpu/drm/i915/intel_pm.c | 5 ----- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0972f49e15d8..80fe02d2d733 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -7187,6 +7187,11 @@ static void ivb_pch_pwm_override(struct drm_device *dev) void intel_modeset_init_hw(struct drm_device *dev) { + /* We attempt to init the necessary power wells early in the initialization + * time, so the subsystems that expect power to be enabled can work. + */ + intel_init_power_wells(dev); + intel_prepare_ddi(dev); intel_init_clock_gating(dev); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index b7859e7110a7..6f3bf22d32f5 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -496,6 +496,7 @@ extern void intel_update_fbc(struct drm_device *dev); extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); extern void intel_gpu_ips_teardown(void); +extern void intel_init_power_wells(struct drm_device *dev); extern void intel_enable_gt_powersave(struct drm_device *dev); extern void intel_disable_gt_powersave(struct drm_device *dev); extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index a1495cd2a8e7..94aabcaa3a67 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3932,11 +3932,6 @@ void intel_init_pm(struct drm_device *dev) else dev_priv->display.get_fifo_size = i830_get_fifo_size; } - - /* We attempt to init the necessary power wells early in the initialization - * time, so the subsystems that expect power to be enabled can work. - */ - intel_init_power_wells(dev); } static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) -- cgit v1.2.3 From 67b1b57182972f3c89c1ff9a58951582cb6bcca7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 5 Jul 2012 23:49:40 +0100 Subject: drm/i915: Disable the BLT on pre-production SNB hardware It never quite worked despite the numerous workarounds, yet I still see people trying to use this hardware and filing bug reports. As we no longer even try to implement the workarounds, since 6a233c78878 (drm/i915/ringbuffer: kill snb blt workaround), simply disable the ring. v2: Add a message to inform the user about the limited capabilities of their pre-production hardware. Signed-off-by: Chris Wilson Reviewed-by: Eric Anholt Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0fdb3d29cbbb..89d62f635ac8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3726,6 +3726,22 @@ void i915_gem_init_ppgtt(struct drm_device *dev) } } +static bool +intel_enable_blt(struct drm_device *dev) +{ + if (!HAS_BLT(dev)) + return false; + + /* The blitter was dysfunctional on early prototypes */ + if (IS_GEN6(dev) && dev->pdev->revision < 8) { + DRM_INFO("BLT not supported on this pre-production hardware;" + " graphics performance will be degraded.\n"); + return false; + } + + return true; +} + int i915_gem_init_hw(struct drm_device *dev) { @@ -3749,7 +3765,7 @@ i915_gem_init_hw(struct drm_device *dev) goto cleanup_render_ring; } - if (HAS_BLT(dev)) { + if (intel_enable_blt(dev)) { ret = intel_init_blt_ring_buffer(dev); if (ret) goto cleanup_bsd_ring; -- cgit v1.2.3 From 12f55818bac7b89c00e43504d12a45c47e49d282 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 5 Jul 2012 17:14:01 +0100 Subject: drm/i915: Add comments to explain the BSD tail write workaround Having had to dive into the bspec to understand what each stage of the workaround meant, and how that the ring broadcasting IDLE corresponded with the GT powering down the ring (i.e. rc6) add comments to aide the next reader. And since the register "is used to control all aspects of PSMI and power saving functions" that makes it quite interesting to inspect with regards to RC6 hangs, so add it to the error-state. v2: Rediscover the piece of magic, set the RNCID to 0 before waiting for the ring to wake up. Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_irq.c | 1 + drivers/gpu/drm/i915/i915_reg.h | 8 ++++---- drivers/gpu/drm/i915/intel_ringbuffer.c | 27 +++++++++++++++++++-------- 5 files changed, 26 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 2909b123baf5..359f6e8b9b00 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -676,6 +676,7 @@ static void i915_ring_error_state(struct seq_file *m, seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); if (INTEL_INFO(dev)->gen >= 6) { + seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]); seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); seq_printf(m, " SYNC_0: 0x%08x\n", error->semaphore_mboxes[ring][0]); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 476c64c4844c..627fe35781b4 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -190,6 +190,7 @@ struct drm_i915_error_state { u32 instdone[I915_NUM_RINGS]; u32 acthd[I915_NUM_RINGS]; u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; + u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */ /* our own tracking of ring head and tail */ u32 cpu_ring_head[I915_NUM_RINGS]; u32 cpu_ring_tail[I915_NUM_RINGS]; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 23f2ea0f0651..566f61b9e47c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1067,6 +1067,7 @@ static void i915_record_ring_state(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; if (INTEL_INFO(dev)->gen >= 6) { + error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); error->semaphore_mboxes[ring->id][0] = I915_READ(RING_SYNC_0(ring->mmio_base)); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ac5688e8b01d..7aa6e97c2c72 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -690,10 +690,10 @@ #define GEN6_BLITTER_FBC_NOTIFY (1<<3) #define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 -#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16) -#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0) -#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0 -#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3) +#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0) +#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2) +#define GEN6_BSD_SLEEP_INDICATOR (1 << 3) +#define GEN6_BSD_GO_INDICATOR (1 << 4) #define GEN6_BSD_HWSTAM 0x12098 #define GEN6_BSD_IMR 0x120a8 diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index d42d821c64d6..ddc48590ea60 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1270,20 +1270,31 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, drm_i915_private_t *dev_priv = ring->dev->dev_private; /* Every tail move must follow the sequence below */ + + /* Disable notification that the ring is IDLE. The GT + * will then assume that it is busy and bring it out of rc6. + */ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, - GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | - GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE); - I915_WRITE(GEN6_BSD_RNCID, 0x0); + _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); + + /* Clear the context id. Here be magic! */ + I915_WRITE64(GEN6_BSD_RNCID, 0x0); + /* Wait for the ring not to be idle, i.e. for it to wake up. */ if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & - GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, - 50)) - DRM_ERROR("timed out waiting for IDLE Indicator\n"); + GEN6_BSD_SLEEP_INDICATOR) == 0, + 50)) + DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); + /* Now that the ring is fully powered up, update the tail */ I915_WRITE_TAIL(ring, value); + POSTING_READ(RING_TAIL(ring->mmio_base)); + + /* Let the ring send IDLE messages to the GT again, + * and so let it sleep to conserve power when idle. + */ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, - GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | - GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); + _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); } static int gen6_ring_flush(struct intel_ring_buffer *ring, -- cgit v1.2.3 From 4b4147c38f89dea536afc50a0a2d2ed005b49288 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Jul 2012 00:31:06 +0200 Subject: drm/i915: fix up PCH backlight #define mixup I so totally suck. This can cause a black screen if (for whatever reason) the bios hasn't set this bit itself. This regression has been introduced in commit 7cf4160148136deb31ee5f2802857dd935a38529 Author: Daniel Vetter Date: Tue Jun 5 10:07:09 2012 +0200 drm/i915: clear up backlight #define confusion on gen4+ Tested-by: Kenneth Graunke Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_reg.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 7aa6e97c2c72..acc99b21e0b6 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1900,7 +1900,7 @@ /* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */ #define BLC_PWM_PCH_CTL1 0xc8250 -#define BLM_PCH_PWM_ENABLE (1 << 30) +#define BLM_PCH_PWM_ENABLE (1 << 31) #define BLM_PCH_OVERRIDE_ENABLE (1 << 30) #define BLM_PCH_POLARITY (1 << 29) #define BLC_PWM_PCH_CTL2 0xc8254 -- cgit v1.2.3 From a2bd1f541f1990dda7dd62f190342c9a964ceb4f Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Jul 2012 12:31:52 +0200 Subject: drm/i915: check whether we actually received an edid in detect_ddc Somehow detect_ddc manages to fall through all checks when we think that something responds on the ddc i2c address, but the edid read failed. Fix this up by explicitly checking for this case. This fixes a regression on newer chips because since commit aaa377302b2994fcc2c66741b47da33feb489dca Author: Daniel Vetter Date: Sat Jun 16 15:30:32 2012 +0200 drm/i915/crt: Do not rely upon the HPD presence pin we use ddc detection also on hotplug capable platforms. And one of these reads all 0s for any i2c transaction if nothing is connected to the vga port. v2: Implement Chris Wilson's review: - simplify logic, default to "nothing detected" - kill stale comment - BUG_ON(!crt->type != ANALOG) Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=51900 Tested-by: Yang Guang Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_crt.c | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 48e3b76e0ab2..7ed4a41c3965 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -330,39 +330,34 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) { struct intel_crt *crt = intel_attached_crt(connector); struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; + struct edid *edid; + struct i2c_adapter *i2c; + + BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); - /* CRT should always be at 0, but check anyway */ - if (crt->base.type != INTEL_OUTPUT_ANALOG) - return false; + i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); + edid = drm_get_edid(connector, i2c); - if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) { - struct edid *edid; - bool is_digital = false; - struct i2c_adapter *i2c; + if (edid) { + bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL; - i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); - edid = drm_get_edid(connector, i2c); /* * This may be a DVI-I connector with a shared DDC * link between analog and digital outputs, so we * have to check the EDID input spec of the attached device. - * - * On the other hand, what should we do if it is a broken EDID? */ - if (edid != NULL) { - is_digital = edid->input & DRM_EDID_INPUT_DIGITAL; - connector->display_info.raw_edid = NULL; - kfree(edid); - } - if (!is_digital) { DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); return true; - } else { - DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); } + + DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); + } else { + DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n"); } + kfree(edid); + return false; } -- cgit v1.2.3 From 4d678e1670ad3c7240d559c98e481b43342a2974 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Jul 2012 12:31:53 +0200 Subject: drm/i915: kill intel_ddc_probe We have way too much lying hardware to rely on a simple "does someone answer on the ddc i2c address?" check. And now it's unused, so just kill it. Reviewed-by: Chris Wilson Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_drv.h | 1 - drivers/gpu/drm/i915/intel_modes.c | 28 ---------------------------- 2 files changed, 29 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 6f3bf22d32f5..84353559441c 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -342,7 +342,6 @@ struct intel_fbc_work { }; int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); -extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); extern void intel_attach_force_audio_property(struct drm_connector *connector); extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector); diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index d67ec3a51e42..45848b9b670b 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c @@ -32,34 +32,6 @@ #include "intel_drv.h" #include "i915_drv.h" -/** - * intel_ddc_probe - * - */ -bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus) -{ - struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private; - u8 out_buf[] = { 0x0, 0x0}; - u8 buf[2]; - struct i2c_msg msgs[] = { - { - .addr = DDC_ADDR, - .flags = 0, - .len = 1, - .buf = out_buf, - }, - { - .addr = DDC_ADDR, - .flags = I2C_M_RD, - .len = 1, - .buf = buf, - } - }; - - return i2c_transfer(intel_gmbus_get_adapter(dev_priv, ddc_bus), - msgs, 2) == 2; -} - /** * intel_ddc_get_modes - get modelist from monitor * @connector: DRM connector device to use -- cgit v1.2.3 From 0d71068835e2610576d369d6d4cbf90e0f802a71 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Fri, 29 Jun 2012 16:03:34 -0300 Subject: drm/i915: try to train DP even harder While debugging Haswell link train failures I observed that we never try the maximum voltage configuration more than once consecutively. We start the training, the monitor keeps telling us to increase the voltage, then when we reach the maximum we just go back to the start (because of the "memset" above "voltage_tries = 0"). When we reach this point, we keep alternating between the maximum and the minimum voltages until we give up. The DP spec suggests that we should try the same voltage 5 times before giving up. This patch makes us try the maximum voltage at least 5 times before going back to the minimum voltages. This patch does not fix any particular bug I'm aware of. Signed-off-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_dp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 2bc1505132c3..0a56b9ab0f58 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1771,7 +1771,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) for (i = 0; i < intel_dp->lane_count; i++) if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) break; - if (i == intel_dp->lane_count) { + if (i == intel_dp->lane_count && voltage_tries == 5) { ++loop_tries; if (loop_tries == 5) { DRM_DEBUG_KMS("too many full retries, give up\n"); -- cgit v1.2.3 From 796265235b47cb40ecfa906cb2c3206bd0d07431 Mon Sep 17 00:00:00 2001 From: Ander Conselvan de Oliveira Date: Fri, 13 Jul 2012 15:50:33 +0300 Subject: drm/i915: mask tiled bit when updating ILK sprites Or going from tiled to untiled may break. Signed-off-by: Ander Conselvan de Oliveira Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_sprite.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 1a1483b924d0..cc8df4de2d92 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -234,6 +234,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, dvscntr &= ~DVS_PIXFORMAT_MASK; dvscntr &= ~DVS_RGB_ORDER_XBGR; dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK; + dvscntr &= ~DVS_TILED; switch (fb->pixel_format) { case DRM_FORMAT_XBGR8888: -- cgit v1.2.3 From 2e6c21ed63af34059009c2ddfa44988d850fca7f Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 12 Jul 2012 23:16:12 -0700 Subject: drm/i915: missing error case in init status page Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_ringbuffer.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index ddc48590ea60..bf0195a96d53 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -972,6 +972,7 @@ static int init_status_page(struct intel_ring_buffer *ring) ring->status_page.gfx_addr = obj->gtt_offset; ring->status_page.page_addr = kmap(obj->pages[0]); if (ring->status_page.page_addr == NULL) { + ret = -ENOMEM; goto err_unpin; } ring->status_page.obj = obj; -- cgit v1.2.3 From 09cf7c9a1299c386525d31305fd50591acf49a9a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 13 Jul 2012 14:14:08 +0100 Subject: drm/i915: Insert a flush between batches if the breadcrumb was dropped If we drop the breadcrumb request after a batch due to a signal for example we aim to fix it up at the next opportunity. In this case we emit a second batchbuffer with no waits upon the first and so no opportunity to insert the missing request, so we need to emit the missing flush for coherency. (Note that that invalidating the render cache is the same as flushing it, so there should have been no observable corruption.) Note that beside simply adding the missing flush, avoiding potential render corruption, this will also fix at least parts of the problem introduced by some funny interaction of these two commits: commit de2b998552c1534e87bfbc51ec5734b02bc89020 Author: Daniel Vetter Date: Wed Jul 4 22:52:50 2012 +0200 drm/i915: don't return a spurious -EIO from intel_ring_begin which allowed intel_ring_begin to return -ERESTARTSYS and commit cc889e0f6ce6a63c62db17d702ecfed86d58083f Author: Daniel Vetter Date: Wed Jun 13 20:45:19 2012 +0200 drm/i915: disable flushing_list/gpu_write_list which essentially disabled the flushing list. The issue happens when we submit a batch & emit it, but get interrupted (thanks to the first patch) while trying to emit the flush. On the next batch we still assume that the full gpu domain handling is in effect and hence compute the invalidate&flushing domains. But thanks to the 2nd patch we totally ignore these and only invalidate all gpu domains, presuming that any required flushes have been issued already. Which is wrong and eventually results in us updating the new write_domain values with the computed pending_write_domain values, which leaves an object with write_domain == 0 on the gpu_write_list. As soon as we try to unbind that object, things blow up. Fix this by emitting the missing flush according to the new ring->gpu_caches_dirty flag. Note that this does _not_ fix all the current cases where we end up with an object on the flushing_list that can't be flushed. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=52040 Signed-off-by: Chris Wilson [danvet: Add bug explanation to commit message.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 88e2e114189c..981e14f000e3 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -885,11 +885,16 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, return ret; } - /* Unconditionally invalidate gpu caches. */ - ret = i915_gem_flush_ring(ring, I915_GEM_GPU_DOMAINS, 0); + /* Unconditionally invalidate gpu caches and ensure that we do flush + * any residual writes from the previous batch. + */ + ret = i915_gem_flush_ring(ring, + I915_GEM_GPU_DOMAINS, + ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0); if (ret) return ret; + ring->gpu_caches_dirty = false; return 0; } -- cgit v1.2.3 From a4f32fc3a37e982fffce8ec583643990ff288419 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Sat, 14 Jul 2012 11:57:12 -0300 Subject: drm/i915: don't forget the PCH backlight registers When we enable/disable the CPU backlight registers we can't forget to enable/disable the PCH backlight registers. Since we're using the CPU registers we should also unset the override bit. Fixes a regression on the following commit: drm/i915: properly enable the blc controller on the right pipe The commit just deleted the code that sets the PCH registers, so it was relying on the values set by the BIOS. I told my BIOS to boot on the DVI monitor instead of the LVDS panel, so I noticed the bug. Signed-off-by: Paulo Zanoni Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_panel.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 58c7ee7238b8..10c7d39034e1 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -289,11 +289,17 @@ void intel_panel_disable_backlight(struct drm_device *dev) intel_panel_actually_set_backlight(dev, 0); if (INTEL_INFO(dev)->gen >= 4) { - uint32_t reg; + uint32_t reg, tmp; reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2; I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE); + + if (HAS_PCH_SPLIT(dev)) { + tmp = I915_READ(BLC_PWM_PCH_CTL1); + tmp &= ~BLM_PCH_PWM_ENABLE; + I915_WRITE(BLC_PWM_PCH_CTL1, tmp); + } } } @@ -333,6 +339,13 @@ void intel_panel_enable_backlight(struct drm_device *dev, I915_WRITE(reg, tmp); POSTING_READ(reg); I915_WRITE(reg, tmp | BLM_PWM_ENABLE); + + if (HAS_PCH_SPLIT(dev)) { + tmp = I915_READ(BLC_PWM_PCH_CTL1); + tmp |= BLM_PCH_PWM_ENABLE; + tmp &= ~BLM_PCH_OVERRIDE_ENABLE; + I915_WRITE(BLC_PWM_PCH_CTL1, tmp); + } } } -- cgit v1.2.3 From 79158103bf20c9ff55204ceb78a5773f422b03be Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 23 May 2012 11:13:58 +0100 Subject: drm/i915: Make the lock for pageflips interruptible As we take the struct_mutex lock to access the command-stream, there is a possibility that we may need to wait for a GPU hang and so should make the lock both interruptible and error-checking. References: https://bugs.freedesktop.org/show_bug.cgi?id=50069 Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 80fe02d2d733..c9b69e3b2c29 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6486,7 +6486,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, intel_fb = to_intel_framebuffer(fb); obj = intel_fb->obj; - mutex_lock(&dev->struct_mutex); + ret = i915_mutex_lock_interruptible(dev); + if (ret) + goto cleanup; /* Reference the objects for the scheduled work. */ drm_gem_object_reference(&work->old_fb_obj->base); @@ -6521,6 +6523,7 @@ cleanup_pending: drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); +cleanup: spin_lock_irqsave(&dev->event_lock, flags); intel_crtc->unpin_work = NULL; spin_unlock_irqrestore(&dev->event_lock, flags); -- cgit v1.2.3 From b97ce28e9f6d65a800e5c2ee3a2a99c7795bef65 Mon Sep 17 00:00:00 2001 From: Ilija Hadzic Date: Tue, 24 Jul 2012 15:30:36 +1000 Subject: drm/ttm: remove stale declaration and field Patch 649bf3ca77343e3be1e0af8e21356fa569b1abd9 has completely removed ttm_backend structure. Remove lingering declaration and related (now stale) field in ttm_tt structure, CC: Jerome Glisse Signed-off-by: Ilija Hadzic Signed-off-by: Dave Airlie --- include/drm/ttm/ttm_bo_driver.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index a05f1b55714d..084e8989a6e1 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -39,8 +39,6 @@ #include "linux/fs.h" #include "linux/spinlock.h" -struct ttm_backend; - struct ttm_backend_func { /** * struct ttm_backend_func member bind @@ -119,7 +117,6 @@ struct ttm_tt { unsigned long num_pages; struct sg_table *sg; /* for SG objects via dma-buf */ struct ttm_bo_global *glob; - struct ttm_backend *be; struct file *swap_storage; enum ttm_caching_state caching_state; enum { -- cgit v1.2.3 From 949c4a34afacfe800fc442afac117aba15284962 Mon Sep 17 00:00:00 2001 From: Ilija Hadzic Date: Tue, 15 May 2012 16:40:10 -0400 Subject: drm: track dev_mapping in more robust and flexible way Setting dev_mapping (pointer to the address_space structure used for memory mappings) to the address_space of the first opener's inode and then failing if other openers come in through a different inode has a few restrictions that are eliminated by this patch. If we already have valid dev_mapping and we spot an opener with different i_node, we force its i_mapping pointer to the already established address_space structure (first opener's inode). This will make all mappings from drm device hang off the same address_space object. Some benefits (things that now work and didn't work before) of this patch are: * user space can mknod and use any number of device nodes and they will all work fine as long as the major device number is that of the drm module. * user space can even remove the first opener's device nodes and mknod the new one and the applications and windowing system will still work. * GPU drivers can safely assume that dev->dev_mapping is correct address_space and just blindly copy it into their (private) bdev.dev_mapping For reference, some discussion that lead to this patch can be found here: http://lists.freedesktop.org/archives/dri-devel/2012-April/022283.html Signed-off-by: Ilija Hadzic Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fops.c | 15 +++++++++------ drivers/gpu/drm/nouveau/nouveau_gem.c | 3 +-- drivers/gpu/drm/radeon/radeon_object.c | 4 +--- drivers/gpu/drm/radeon/radeon_ttm.c | 4 +--- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 5 +---- 5 files changed, 13 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index d25a61739a7b..5062eec673f1 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -140,12 +140,12 @@ int drm_open(struct inode *inode, struct file *filp) } if (!retcode) { mutex_lock(&dev->struct_mutex); - if (minor->type == DRM_MINOR_LEGACY) { - if (dev->dev_mapping == NULL) - dev->dev_mapping = inode->i_mapping; - else if (dev->dev_mapping != inode->i_mapping) - retcode = -ENODEV; - } + if (dev->dev_mapping == NULL) + dev->dev_mapping = &inode->i_data; + /* ihold ensures nobody can remove inode with our i_data */ + ihold(container_of(dev->dev_mapping, struct inode, i_data)); + inode->i_mapping = dev->dev_mapping; + filp->f_mapping = dev->dev_mapping; mutex_unlock(&dev->struct_mutex); } @@ -509,6 +509,9 @@ int drm_release(struct inode *inode, struct file *filp) } } + BUG_ON(dev->dev_mapping == NULL); + iput(container_of(dev->dev_mapping, struct inode, i_data)); + /* drop the reference held my the file priv */ drm_master_put(&file_priv->master); file_priv->is_master = 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 30f542316944..fe3f5a821b84 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -207,8 +207,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, struct nouveau_bo *nvbo = NULL; int ret = 0; - if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) - dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; + dev_priv->ttm.bdev.dev_mapping = dev->dev_mapping; if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) { NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 6ecb2006e27e..1f77a5afc011 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -115,9 +115,7 @@ int radeon_bo_create(struct radeon_device *rdev, size = ALIGN(size, PAGE_SIZE); - if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { - rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; - } + rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; if (kernel) { type = ttm_bo_type_kernel; } else if (sg) { diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 0881131a0388..5b71c716d83f 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -734,9 +734,7 @@ int radeon_ttm_init(struct radeon_device *rdev) } DRM_INFO("radeon: %uM of GTT memory ready.\n", (unsigned)(rdev->mc.gtt_size / (1024 * 1024))); - if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { - rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; - } + rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; r = radeon_ttm_debugfs_init(rdev); if (r) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 5d5632f5265b..4d9edead01ac 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -769,10 +769,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) goto out_no_tfile; file_priv->driver_priv = vmw_fp; - - if (unlikely(dev_priv->bdev.dev_mapping == NULL)) - dev_priv->bdev.dev_mapping = - file_priv->filp->f_path.dentry->d_inode->i_mapping; + dev_priv->bdev.dev_mapping = dev->dev_mapping; return 0; -- cgit v1.2.3 From d1c7871ddb1f588b8eb35affd9ee1a3d5e11cd0c Mon Sep 17 00:00:00 2001 From: Jerome Glisse Date: Thu, 12 Jul 2012 18:23:05 -0400 Subject: drm/radeon: fix bo creation retry path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Retry label was at wrong place in function leading to memory leak. Cc: Signed-off-by: Jerome Glisse Reviewed-by: Michel Dänzer Reviewed-by: Christian König Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_object.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 1f77a5afc011..1f1a4c803c1d 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -136,7 +136,6 @@ int radeon_bo_create(struct radeon_device *rdev, acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, sizeof(struct radeon_bo)); -retry: bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); if (bo == NULL) return -ENOMEM; @@ -150,6 +149,8 @@ retry: bo->surface_reg = -1; INIT_LIST_HEAD(&bo->list); INIT_LIST_HEAD(&bo->va); + +retry: radeon_ttm_placement_from_domain(bo, domain); /* Kernel allocation are uninterruptible */ down_read(&rdev->pm.mclk_lock); -- cgit v1.2.3 From 1e179d4e283bd197035960ef751b5ccac06cbf91 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 20 Jul 2012 14:17:00 +0300 Subject: drm/radeon: check for allocation failure in radeon_ring_backup() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Static checkers complain if this we don't check for allocation failure. Also we can use the new kmalloc_array() function here as a cleanup. Signed-off-by: Dan Carpenter Reviewed-by: Christian König Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_ring.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index f2fc25de0b2f..ec79b3750430 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -594,7 +594,11 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring } /* and then save the content of the ring */ - *data = kmalloc(size * 4, GFP_KERNEL); + *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); + if (!*data) { + mutex_unlock(&rdev->ring_lock); + return 0; + } for (i = 0; i < size; ++i) { (*data)[i] = ring->ring[ptr++]; ptr &= ring->ptr_mask; -- cgit v1.2.3 From 8d1c702aa0b2c4b22b0742b72a1149d91690674b Mon Sep 17 00:00:00 2001 From: Jerome Glisse Date: Tue, 17 Jul 2012 17:17:16 -0400 Subject: drm/radeon: fix non revealent error message We want to print link status query failed only if it's an unexepected fail. If we query to see if we need link training it might be because there is nothing connected and thus link status query have the right to fail in that case. To avoid printing failure when it's expected, move the failure message to proper place. Cc: stable@vger.kernel.org Signed-off-by: Jerome Glisse Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/atombios_dp.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 0355536f61e4..7712cf5ab33b 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -22,6 +22,7 @@ * * Authors: Dave Airlie * Alex Deucher + * Jerome Glisse */ #include "drmP.h" #include "radeon_drm.h" @@ -654,7 +655,6 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector, ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS, link_status, DP_LINK_STATUS_SIZE, 100); if (ret <= 0) { - DRM_ERROR("displayport link status failed\n"); return false; } @@ -833,8 +833,10 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info) else mdelay(dp_info->rd_interval * 4); - if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) + if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) { + DRM_ERROR("displayport link status failed\n"); break; + } if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) { clock_recovery = true; @@ -896,8 +898,10 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info) else mdelay(dp_info->rd_interval * 4); - if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) + if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) { + DRM_ERROR("displayport link status failed\n"); break; + } if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) { channel_eq = true; -- cgit v1.2.3 From 266dcba541a1ef7e5d82d9e67c67fde2910636e8 Mon Sep 17 00:00:00 2001 From: Jerome Glisse Date: Thu, 19 Jul 2012 17:15:56 -0400 Subject: drm/radeon: fix hotplug of DP to DVI|HDMI passive adapters (v2) No need to retrain the link for passive adapters. v2: agd5f - no passive DP to VGA adapters, update comments - assign radeon_connector_atom_dig after we are sure we have a digital connector as analog connectors have different private data. - get new sink type before checking for retrain. No need to check if it's no longer a DP connection. Cc: Signed-off-by: Jerome Glisse Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_connectors.c | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 2914c5761cfc..3524f17e3f05 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -64,14 +64,27 @@ void radeon_connector_hotplug(struct drm_connector *connector) /* just deal with DP (not eDP) here. */ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { - int saved_dpms = connector->dpms; - - /* Only turn off the display it it's physically disconnected */ - if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); - else if (radeon_dp_needs_link_train(radeon_connector)) - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); - connector->dpms = saved_dpms; + struct radeon_connector_atom_dig *dig_connector = + radeon_connector->con_priv; + + /* if existing sink type was not DP no need to retrain */ + if (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) + return; + + /* first get sink type as it may be reset after (un)plug */ + dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); + /* don't do anything if sink is not display port, i.e., + * passive dp->(dvi|hdmi) adaptor + */ + if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { + int saved_dpms = connector->dpms; + /* Only turn off the display if it's physically disconnected */ + if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + else if (radeon_dp_needs_link_train(radeon_connector)) + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); + connector->dpms = saved_dpms; + } } } -- cgit v1.2.3 From ca2ccde5e2f24a792caa4cca919fc5c6f65d1887 Mon Sep 17 00:00:00 2001 From: Jerome Glisse Date: Thu, 19 Jul 2012 17:25:55 -0400 Subject: drm/radeon: on hotplug force link training to happen (v2) To have DP behave like VGA/DVI we need to retrain the link on hotplug. For this to happen we need to force link training to happen by setting connector dpms to off before asking it turning it on again. v2: agd5f - drop the dp_get_link_status() change in atombios_dp.c for now. We still need the dpms OFF change. Signed-off-by: Jerome Glisse Cc: stable@vger.kernel.org Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_connectors.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 3524f17e3f05..895e628b60f8 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -79,10 +79,16 @@ void radeon_connector_hotplug(struct drm_connector *connector) if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { int saved_dpms = connector->dpms; /* Only turn off the display if it's physically disconnected */ - if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) + if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); - else if (radeon_dp_needs_link_train(radeon_connector)) + } else if (radeon_dp_needs_link_train(radeon_connector)) { + /* set it to OFF so that drm_helper_connector_dpms() + * won't return immediately since the current state + * is ON at this point. + */ + connector->dpms = DRM_MODE_DPMS_OFF; drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); + } connector->dpms = saved_dpms; } } -- cgit v1.2.3 From fcedac670c3da0d17aaa5db1708694971e8024a9 Mon Sep 17 00:00:00 2001 From: Jerome Glisse Date: Tue, 24 Jul 2012 17:06:11 -0400 Subject: drm/radeon: fix dpms on/off on trinity/aruba v2 The external encoder need to be setup again before enabling the transmiter. This seems to be only needed on some trinity/aruba to fix dpms on. v2: Add comment, only setup again on dce6 ie aruba or newer. Cc: Signed-off-by: Jerome Glisse Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/atombios_encoders.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 7dfc62fae6a6..f9bc27fe269a 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -1392,10 +1392,18 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) case DRM_MODE_DPMS_ON: /* some early dce3.2 boards have a bug in their transmitter control table */ if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730) || - ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) + ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { + if (ASIC_IS_DCE6(rdev)) { + /* It seems we need to call ATOM_ENCODER_CMD_SETUP again + * before reenabling encoder on DPMS ON, otherwise we never + * get picture + */ + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); + } atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); - else + } else { atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); + } if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { atombios_set_edp_panel_power(connector, -- cgit v1.2.3 From d3373a241bbf034a1ef43e6417e3d0029c8a8145 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 15 Jul 2012 12:34:22 +0100 Subject: drm/i915: Flush the context object from the CPU caches upon switching The issue is that we stale data in the CPU caches, when we come to swap-out the object, the CPU may short-circuit the reads from those cacheline and so corrupt the context object. Secondary, leaving the context object as being marked in the CPU write domain whilst on the GPU active list is a bad idea and will throw warnings later. Note: Thanks to calling set_to_gtt_domain with write = false and not setting any gpu write domain when putting a context object onto the active list (when we switch away from it) the set_to_gtt_domain call won't block. Signed-off-by: Chris Wilson Reviewed-by: Ben Widawsky [danvet: Added a note to the commit message and a comment in the code to explain the clever non-blocking trick.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_context.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 18eee8d2522d..65639ad72808 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -374,6 +374,17 @@ static int do_switch(struct drm_i915_gem_object *from_obj, if (ret) return ret; + /* Clear this page out of any CPU caches for coherent swap-in/out. Note + * that thanks to write = false in this call and us not setting any gpu + * write domains when putting a context object onto the active list + * (when switching away from it), this won't block. + * XXX: We need a real interface to do this instead of trickery. */ + ret = i915_gem_object_set_to_gtt_domain(to->obj, false); + if (ret) { + i915_gem_object_unpin(to->obj); + return ret; + } + if (!to->obj->has_global_gtt_mapping) i915_gem_gtt_bind_object(to->obj, to->obj->cache_level); -- cgit v1.2.3 From eeef9b3874d756405ab8f71b4012a2e7ce31458e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 16 Jul 2012 13:05:34 +0100 Subject: drm/i915: Add -EIO to the list of known errors for __wait_seqno This prevents a WARN introduced with commit de2b998552c1534e87bfbc51ec5734b02bc89020 Author: Daniel Vetter Date: Wed Jul 4 22:52:50 2012 +0200 drm/i915: don't return a spurious -EIO from intel_ring_begin Signed-off-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 89d62f635ac8..5c4657a54f97 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2003,6 +2003,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, } switch (end) { + case -EIO: case -EAGAIN: /* Wedged */ case -ERESTARTSYS: /* Signal */ return (int)end; -- cgit v1.2.3 From b259b3120607f642daabdaadc99430a306899ffe Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 15 Jul 2012 12:34:23 +0100 Subject: drm/i915: fix invalid reference handling of the default ctx obj Otherwise we end up trying to unpin a freed object and BUG. Signed-off-by: Chris Wilson Reviewed-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_context.c | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 65639ad72808..873119f2436e 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -419,8 +419,11 @@ static int do_switch(struct drm_i915_gem_object *from_obj, from_obj->dirty = 1; BUG_ON(from_obj->ring != to->ring); i915_gem_object_unpin(from_obj); + + drm_gem_object_unreference(&from_obj->base); } + drm_gem_object_reference(&to->obj->base); ring->last_context_obj = to->obj; to->is_initialized = true; @@ -470,20 +473,7 @@ int i915_switch_context(struct intel_ring_buffer *ring, if (from_obj == to->obj) return 0; - ret = do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring)); - if (ret) - return ret; - - /* Just to make the code a little cleaner we take the object reference - * after the switch was successful. It would be more intuitive to ref - * the 'to' object before the switch but we know the refcount must be >0 - * if context_get() succeeded, and we hold struct mutex. So it's safe to - * do this here/now - */ - drm_gem_object_reference(&to->obj->base); - if (from_obj != NULL) - drm_gem_object_unreference(&from_obj->base); - return ret; + return do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring)); } int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, -- cgit v1.2.3 From be636387f81376ff26963987531c89a63faf8a33 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 17 Jul 2012 09:44:49 +0300 Subject: drm/i915: dereferencing an error pointer We need to check that "ctx" is a valid pointer before dereferencing it. Signed-off-by: Dan Carpenter Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_context.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 873119f2436e..da8b01fb1bf8 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -497,11 +497,13 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, ctx = create_hw_context(dev, file_priv); mutex_unlock(&dev->struct_mutex); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); args->ctx_id = ctx->id; DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id); - return PTR_RET(ctx); + return 0; } int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, -- cgit v1.2.3 From 520c41cf2fa029d1e8b923ac2026f96664f17c4b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Jul 2012 16:27:52 +0200 Subject: drm/i915/lvds: ditch ->prepare special case LVDS is the first output where dpms on/off and prepare/commit don't perfectly match. Now the idea behind this special case seems to be that for simple resolution changes on the LVDS we don't need to stop the pipe, because (at least on newer chips) we can adjust the panel fitter on the fly. There are a few problems with the current code though: - We still stop and restart the pipe unconditionally, because the crtc helper code isn't flexible enough. - We show some ugly flickering, especially when changing crtcs (this the crtc helper would actually take into account, but we don't implement the encoder->get_crtc callback required to make this work properly). So it doesn't even work as advertised. I agree that it would be nice to do resolution changes on LVDS (and also eDP) whithout blacking the screen where the panel fitter allows to do that. But imo we should implement this as a special case a few layers up in the mode set code, akin to how we already detect simple framebuffer changes (and only update the required registers with ->mode_set_base). Until this is all in place, make our lives easier and just rip it out. Also note that this seems to fix actual bugs with enabling the lvds output, see: http://lists.freedesktop.org/archives/intel-gfx/2012-July/018614.html Cc: Takashi Iwai Cc: Giacomo Comes Acked-by: Chris Wilson Tested-by: Takashi Iwai Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_lvds.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 49f09a8b05e9..e05c0d3e3440 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -409,13 +409,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder) { struct intel_lvds *intel_lvds = to_intel_lvds(encoder); - /* - * Prior to Ironlake, we must disable the pipe if we want to adjust - * the panel fitter. However at all other times we can just reset - * the registers regardless. - */ - if (!HAS_PCH_SPLIT(encoder->dev) && intel_lvds->pfit_dirty) - intel_lvds_disable(intel_lvds); + intel_lvds_disable(intel_lvds); } static void intel_lvds_commit(struct drm_encoder *encoder) -- cgit v1.2.3 From 35313cde4c582f3581a4b8397311f302680625f5 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 20 Jul 2012 10:30:45 +0200 Subject: drm/i915: constify mode in crtc_mode_fixup Laurent Pinchart missed this when sending in is giant constify patch: commit e811f5ae19043b2ac2c28e147a4274038e655598 Author: Laurent Pinchart Date: Tue Jul 17 17:56:50 2012 +0200 drm: Make the .mode_fixup() operations mode argument a const pointer Acked-by; Laurent Pinchart Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index c9b69e3b2c29..e9e476eca89f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3573,7 +3573,7 @@ void intel_encoder_destroy(struct drm_encoder *encoder) } static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = crtc->dev; -- cgit v1.2.3 From 0da5cec1de1eb39563fb6ca2be48c0e56fed6309 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Mon, 23 Jul 2012 12:33:55 -0700 Subject: drm/i915: Set the context before setting up regs for the context. Fixes failures in transform feedback on gen7 because our SOL_RESET flag was setting the transform feedback offsets in the old context (occasionally happened to be ours) instead of the new context. Signed-off-by: Eric Anholt Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 981e14f000e3..5af631e788c8 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1228,6 +1228,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } } + ret = i915_switch_context(ring, file, ctx_id); + if (ret) + goto err; + if (ring == &dev_priv->ring[RCS] && mode != dev_priv->relative_constants_mode) { ret = intel_ring_begin(ring, 4); @@ -1249,10 +1253,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } - ret = i915_switch_context(ring, file, ctx_id); - if (ret) - goto err; - trace_i915_gem_ring_dispatch(ring, seqno); exec_start = batch_obj->gtt_offset + args->batch_start_offset; -- cgit v1.2.3 From e8aeaee7b012f1cdb382765d17307445385aa87c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sat, 21 Jul 2012 16:47:09 +0200 Subject: drm/i915: unbreak lastclose for failed driver init We now refuse to load on gen6+ if kms is not enabled: commit 26394d9251879231b85e6c8cf899fa43e75c68f1 Author: Daniel Vetter Date: Mon Mar 26 21:33:18 2012 +0200 drm/i915: refuse to load on gen6+ without kms Which results in the drm core calling our lastclose function to clean up the mess, but that one is neatly broken for such failure cases since kms has been introduced in commit 79e539453b34e35f39299a899d263b0a1f1670bd Author: Jesse Barnes Date: Fri Nov 7 14:24:08 2008 -0800 DRM: i915: add mode setting support Reported-and-tested-by: Paulo Zanoni Signed-Off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_dma.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index f64ef4b723fd..9cf7dfe022b9 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1781,7 +1781,13 @@ void i915_driver_lastclose(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; - if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { + /* On gen6+ we refuse to init without kms enabled, but then the drm core + * goes right around and calls lastclose. Check for this and don't clean + * up anything. */ + if (!dev_priv) + return; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_fb_restore_mode(dev); vga_switcheroo_process_delayed_switch(); return; -- cgit v1.2.3 From ce806a30470bcd846d148bf39d46de3ad7748228 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Mon, 4 Jun 2012 12:00:31 +0200 Subject: nouveau: Fix alignment requirements on src and dst addresses Linear copy works by adding the offset to the buffer address, which may end up not being 16-byte aligned. Some tests I've written for prime_pcopy show that the engine allows this correctly, so the restriction on lowest 4 bits of address can be lifted safely. The comments added were by envyas, I think because I used a newer version. Signed-off-by: Maarten Lankhorst Cc: stable@vger.kernel.org --- drivers/gpu/drm/nouveau/nva3_copy.fuc | 4 +- drivers/gpu/drm/nouveau/nva3_copy.fuc.h | 94 +++++++++++++++++++++++++++++++-- drivers/gpu/drm/nouveau/nvc0_copy.fuc.h | 87 ++++++++++++++++++++++++++++-- 3 files changed, 175 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc b/drivers/gpu/drm/nouveau/nva3_copy.fuc index abc36626fef0..219850d53286 100644 --- a/drivers/gpu/drm/nouveau/nva3_copy.fuc +++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc @@ -119,9 +119,9 @@ dispatch_dma: // mthd 0x030c-0x0340, various stuff .b16 0xc3 14 .b32 #ctx_src_address_high ~0x000000ff -.b32 #ctx_src_address_low ~0xfffffff0 +.b32 #ctx_src_address_low ~0xffffffff .b32 #ctx_dst_address_high ~0x000000ff -.b32 #ctx_dst_address_low ~0xfffffff0 +.b32 #ctx_dst_address_low ~0xffffffff .b32 #ctx_src_pitch ~0x0007ffff .b32 #ctx_dst_pitch ~0x0007ffff .b32 #ctx_xcnt ~0x0000ffff diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h index 1f33fbdc00be..37d6de3c9d61 100644 --- a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h +++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h @@ -1,37 +1,72 @@ -uint32_t nva3_pcopy_data[] = { +u32 nva3_pcopy_data[] = { +/* 0x0000: ctx_object */ 0x00000000, +/* 0x0004: ctx_dma */ +/* 0x0004: ctx_dma_query */ 0x00000000, +/* 0x0008: ctx_dma_src */ 0x00000000, +/* 0x000c: ctx_dma_dst */ 0x00000000, +/* 0x0010: ctx_query_address_high */ 0x00000000, +/* 0x0014: ctx_query_address_low */ 0x00000000, +/* 0x0018: ctx_query_counter */ 0x00000000, +/* 0x001c: ctx_src_address_high */ 0x00000000, +/* 0x0020: ctx_src_address_low */ 0x00000000, +/* 0x0024: ctx_src_pitch */ 0x00000000, +/* 0x0028: ctx_src_tile_mode */ 0x00000000, +/* 0x002c: ctx_src_xsize */ 0x00000000, +/* 0x0030: ctx_src_ysize */ 0x00000000, +/* 0x0034: ctx_src_zsize */ 0x00000000, +/* 0x0038: ctx_src_zoff */ 0x00000000, +/* 0x003c: ctx_src_xoff */ 0x00000000, +/* 0x0040: ctx_src_yoff */ 0x00000000, +/* 0x0044: ctx_src_cpp */ 0x00000000, +/* 0x0048: ctx_dst_address_high */ 0x00000000, +/* 0x004c: ctx_dst_address_low */ 0x00000000, +/* 0x0050: ctx_dst_pitch */ 0x00000000, +/* 0x0054: ctx_dst_tile_mode */ 0x00000000, +/* 0x0058: ctx_dst_xsize */ 0x00000000, +/* 0x005c: ctx_dst_ysize */ 0x00000000, +/* 0x0060: ctx_dst_zsize */ 0x00000000, +/* 0x0064: ctx_dst_zoff */ 0x00000000, +/* 0x0068: ctx_dst_xoff */ 0x00000000, +/* 0x006c: ctx_dst_yoff */ 0x00000000, +/* 0x0070: ctx_dst_cpp */ 0x00000000, +/* 0x0074: ctx_format */ 0x00000000, +/* 0x0078: ctx_swz_const0 */ 0x00000000, +/* 0x007c: ctx_swz_const1 */ 0x00000000, +/* 0x0080: ctx_xcnt */ 0x00000000, +/* 0x0084: ctx_ycnt */ 0x00000000, 0x00000000, 0x00000000, @@ -63,6 +98,7 @@ uint32_t nva3_pcopy_data[] = { 0x00000000, 0x00000000, 0x00000000, +/* 0x0100: dispatch_table */ 0x00010000, 0x00000000, 0x00000000, @@ -73,6 +109,7 @@ uint32_t nva3_pcopy_data[] = { 0x00010162, 0x00000000, 0x00030060, +/* 0x0128: dispatch_dma */ 0x00010170, 0x00000000, 0x00010170, @@ -118,11 +155,11 @@ uint32_t nva3_pcopy_data[] = { 0x0000001c, 0xffffff00, 0x00000020, - 0x0000000f, + 0x00000000, 0x00000048, 0xffffff00, 0x0000004c, - 0x0000000f, + 0x00000000, 0x00000024, 0xfff80000, 0x00000050, @@ -146,7 +183,8 @@ uint32_t nva3_pcopy_data[] = { 0x00000800, }; -uint32_t nva3_pcopy_code[] = { +u32 nva3_pcopy_code[] = { +/* 0x0000: main */ 0x04fe04bd, 0x3517f000, 0xf10010fe, @@ -158,23 +196,31 @@ uint32_t nva3_pcopy_code[] = { 0x17f11031, 0x27f01200, 0x0012d003, +/* 0x002f: spin */ 0xf40031f4, 0x0ef40028, +/* 0x0035: ih */ 0x8001cffd, 0xf40812c4, 0x21f4060b, +/* 0x0041: ih_no_chsw */ 0x0412c472, 0xf4060bf4, +/* 0x004a: ih_no_cmd */ 0x11c4c321, 0x4001d00c, +/* 0x0052: swctx */ 0x47f101f8, 0x4bfe7700, 0x0007fe00, 0xf00204b9, 0x01f40643, 0x0604fa09, +/* 0x006b: swctx_load */ 0xfa060ef4, +/* 0x006e: swctx_done */ 0x03f80504, +/* 0x0072: chsw */ 0x27f100f8, 0x23cf1400, 0x1e3fc800, @@ -183,18 +229,22 @@ uint32_t nva3_pcopy_code[] = { 0x1e3af052, 0xf00023d0, 0x24d00147, +/* 0x0093: chsw_no_unload */ 0xcf00f880, 0x3dc84023, 0x220bf41e, 0xf40131f4, 0x57f05221, 0x0367f004, +/* 0x00a8: chsw_load_ctx_dma */ 0xa07856bc, 0xb6018068, 0x87d00884, 0x0162b600, +/* 0x00bb: chsw_finish_load */ 0xf0f018f4, 0x23d00237, +/* 0x00c3: dispatch */ 0xf100f880, 0xcf190037, 0x33cf4032, @@ -202,6 +252,7 @@ uint32_t nva3_pcopy_code[] = { 0x1024b607, 0x010057f1, 0x74bd64bd, +/* 0x00dc: dispatch_loop */ 0x58005658, 0x50b60157, 0x0446b804, @@ -211,6 +262,7 @@ uint32_t nva3_pcopy_code[] = { 0xb60276bb, 0x57bb0374, 0xdf0ef400, +/* 0x0100: dispatch_valid_mthd */ 0xb60246bb, 0x45bb0344, 0x01459800, @@ -220,31 +272,41 @@ uint32_t nva3_pcopy_code[] = { 0xb0014658, 0x1bf40064, 0x00538009, +/* 0x0127: dispatch_cmd */ 0xf4300ef4, 0x55f90132, 0xf40c01f4, +/* 0x0132: dispatch_invalid_bitfield */ 0x25f0250e, +/* 0x0135: dispatch_illegal_mthd */ 0x0125f002, +/* 0x0138: dispatch_error */ 0x100047f1, 0xd00042d0, 0x27f04043, 0x0002d040, +/* 0x0148: hostirq_wait */ 0xf08002cf, 0x24b04024, 0xf71bf400, +/* 0x0154: dispatch_done */ 0x1d0027f1, 0xd00137f0, 0x00f80023, +/* 0x0160: cmd_nop */ +/* 0x0162: cmd_pm_trigger */ 0x27f100f8, 0x34bd2200, 0xd00233f0, 0x00f80023, +/* 0x0170: cmd_dma */ 0x012842b7, 0xf00145b6, 0x43801e39, 0x0040b701, 0x0644b606, 0xf80043d0, +/* 0x0189: cmd_exec_set_format */ 0xf030f400, 0xb00001b0, 0x01b00101, @@ -256,20 +318,26 @@ uint32_t nva3_pcopy_code[] = { 0x70b63847, 0x0232f401, 0x94bd84bd, +/* 0x01b4: ncomp_loop */ 0xb60f4ac4, 0xb4bd0445, +/* 0x01bc: bpc_loop */ 0xf404a430, 0xa5ff0f18, 0x00cbbbc0, 0xf40231f4, +/* 0x01ce: cmp_c0 */ 0x1bf4220e, 0x10c7f00c, 0xf400cbbb, +/* 0x01da: cmp_c1 */ 0xa430160e, 0x0c18f406, 0xbb14c7f0, 0x0ef400cb, +/* 0x01e9: cmp_zero */ 0x80c7f107, +/* 0x01ed: bpc_next */ 0x01c83800, 0xb60180b6, 0xb5b801b0, @@ -280,6 +348,7 @@ uint32_t nva3_pcopy_code[] = { 0x98110680, 0x68fd2008, 0x0502f400, +/* 0x0216: dst_xcnt */ 0x75fd64bd, 0x1c078000, 0xf10078fd, @@ -304,6 +373,7 @@ uint32_t nva3_pcopy_code[] = { 0x980056d0, 0x56d01f06, 0x1030f440, +/* 0x0276: cmd_exec_set_surface_tiled */ 0x579800f8, 0x6879c70a, 0xb66478c7, @@ -311,9 +381,11 @@ uint32_t nva3_pcopy_code[] = { 0x0e76b060, 0xf0091bf4, 0x0ef40477, +/* 0x0291: xtile64 */ 0x027cf00f, 0xfd1170b6, 0x77f00947, +/* 0x029d: xtileok */ 0x0f5a9806, 0xfd115b98, 0xb7f000ab, @@ -371,6 +443,7 @@ uint32_t nva3_pcopy_code[] = { 0x67d00600, 0x0060b700, 0x0068d004, +/* 0x0382: cmd_exec_set_surface_linear */ 0x6cf000f8, 0x0260b702, 0x0864b602, @@ -381,13 +454,16 @@ uint32_t nva3_pcopy_code[] = { 0xb70067d0, 0x98040060, 0x67d00957, +/* 0x03ab: cmd_exec_wait */ 0xf900f800, 0xf110f900, 0xb6080007, +/* 0x03b6: loop */ 0x01cf0604, 0x0114f000, 0xfcfa1bf4, 0xf800fc10, +/* 0x03c5: cmd_exec_query */ 0x0d34c800, 0xf5701bf4, 0xf103ab21, @@ -417,6 +493,7 @@ uint32_t nva3_pcopy_code[] = { 0x47f10153, 0x44b60800, 0x0045d006, +/* 0x0438: query_counter */ 0x03ab21f5, 0x080c47f1, 0x980644b6, @@ -439,11 +516,13 @@ uint32_t nva3_pcopy_code[] = { 0x47f10153, 0x44b60800, 0x0045d006, +/* 0x0492: cmd_exec */ 0x21f500f8, 0x3fc803ab, 0x0e0bf400, 0x018921f5, 0x020047f1, +/* 0x04a7: cmd_exec_no_format */ 0xf11e0ef4, 0xb6081067, 0x77f00664, @@ -451,19 +530,24 @@ uint32_t nva3_pcopy_code[] = { 0x981c0780, 0x67d02007, 0x4067d000, +/* 0x04c2: cmd_exec_init_src_surface */ 0x32f444bd, 0xc854bd02, 0x0bf4043f, 0x8221f50a, 0x0a0ef403, +/* 0x04d4: src_tiled */ 0x027621f5, +/* 0x04db: cmd_exec_init_dst_surface */ 0xf40749f0, 0x57f00231, 0x083fc82c, 0xf50a0bf4, 0xf4038221, +/* 0x04ee: dst_tiled */ 0x21f50a0e, 0x49f00276, +/* 0x04f5: cmd_exec_kick */ 0x0057f108, 0x0654b608, 0xd0210698, @@ -473,6 +557,8 @@ uint32_t nva3_pcopy_code[] = { 0xc80054d0, 0x0bf40c3f, 0xc521f507, +/* 0x0519: cmd_exec_done */ +/* 0x051b: cmd_wrcache_flush */ 0xf100f803, 0xbd220027, 0x0133f034, diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h index a8d17458ced1..cd879f31bb38 100644 --- a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h +++ b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h @@ -1,34 +1,65 @@ -uint32_t nvc0_pcopy_data[] = { +u32 nvc0_pcopy_data[] = { +/* 0x0000: ctx_object */ 0x00000000, +/* 0x0004: ctx_query_address_high */ 0x00000000, +/* 0x0008: ctx_query_address_low */ 0x00000000, +/* 0x000c: ctx_query_counter */ 0x00000000, +/* 0x0010: ctx_src_address_high */ 0x00000000, +/* 0x0014: ctx_src_address_low */ 0x00000000, +/* 0x0018: ctx_src_pitch */ 0x00000000, +/* 0x001c: ctx_src_tile_mode */ 0x00000000, +/* 0x0020: ctx_src_xsize */ 0x00000000, +/* 0x0024: ctx_src_ysize */ 0x00000000, +/* 0x0028: ctx_src_zsize */ 0x00000000, +/* 0x002c: ctx_src_zoff */ 0x00000000, +/* 0x0030: ctx_src_xoff */ 0x00000000, +/* 0x0034: ctx_src_yoff */ 0x00000000, +/* 0x0038: ctx_src_cpp */ 0x00000000, +/* 0x003c: ctx_dst_address_high */ 0x00000000, +/* 0x0040: ctx_dst_address_low */ 0x00000000, +/* 0x0044: ctx_dst_pitch */ 0x00000000, +/* 0x0048: ctx_dst_tile_mode */ 0x00000000, +/* 0x004c: ctx_dst_xsize */ 0x00000000, +/* 0x0050: ctx_dst_ysize */ 0x00000000, +/* 0x0054: ctx_dst_zsize */ 0x00000000, +/* 0x0058: ctx_dst_zoff */ 0x00000000, +/* 0x005c: ctx_dst_xoff */ 0x00000000, +/* 0x0060: ctx_dst_yoff */ 0x00000000, +/* 0x0064: ctx_dst_cpp */ 0x00000000, +/* 0x0068: ctx_format */ 0x00000000, +/* 0x006c: ctx_swz_const0 */ 0x00000000, +/* 0x0070: ctx_swz_const1 */ 0x00000000, +/* 0x0074: ctx_xcnt */ 0x00000000, +/* 0x0078: ctx_ycnt */ 0x00000000, 0x00000000, 0x00000000, @@ -63,6 +94,7 @@ uint32_t nvc0_pcopy_data[] = { 0x00000000, 0x00000000, 0x00000000, +/* 0x0100: dispatch_table */ 0x00010000, 0x00000000, 0x00000000, @@ -111,11 +143,11 @@ uint32_t nvc0_pcopy_data[] = { 0x00000010, 0xffffff00, 0x00000014, - 0x0000000f, + 0x00000000, 0x0000003c, 0xffffff00, 0x00000040, - 0x0000000f, + 0x00000000, 0x00000018, 0xfff80000, 0x00000044, @@ -139,7 +171,8 @@ uint32_t nvc0_pcopy_data[] = { 0x00000800, }; -uint32_t nvc0_pcopy_code[] = { +u32 nvc0_pcopy_code[] = { +/* 0x0000: main */ 0x04fe04bd, 0x3517f000, 0xf10010fe, @@ -151,15 +184,20 @@ uint32_t nvc0_pcopy_code[] = { 0x17f11031, 0x27f01200, 0x0012d003, +/* 0x002f: spin */ 0xf40031f4, 0x0ef40028, +/* 0x0035: ih */ 0x8001cffd, 0xf40812c4, 0x21f4060b, +/* 0x0041: ih_no_chsw */ 0x0412c4ca, 0xf5070bf4, +/* 0x004b: ih_no_cmd */ 0xc4010221, 0x01d00c11, +/* 0x0053: swctx */ 0xf101f840, 0xfe770047, 0x47f1004b, @@ -188,8 +226,11 @@ uint32_t nvc0_pcopy_code[] = { 0xf00204b9, 0x01f40643, 0x0604fa09, +/* 0x00c3: swctx_load */ 0xfa060ef4, +/* 0x00c6: swctx_done */ 0x03f80504, +/* 0x00ca: chsw */ 0x27f100f8, 0x23cf1400, 0x1e3fc800, @@ -198,18 +239,22 @@ uint32_t nvc0_pcopy_code[] = { 0x1e3af053, 0xf00023d0, 0x24d00147, +/* 0x00eb: chsw_no_unload */ 0xcf00f880, 0x3dc84023, 0x090bf41e, 0xf40131f4, +/* 0x00fa: chsw_finish_load */ 0x37f05321, 0x8023d002, +/* 0x0102: dispatch */ 0x37f100f8, 0x32cf1900, 0x0033cf40, 0x07ff24e4, 0xf11024b6, 0xbd010057, +/* 0x011b: dispatch_loop */ 0x5874bd64, 0x57580056, 0x0450b601, @@ -219,6 +264,7 @@ uint32_t nvc0_pcopy_code[] = { 0xbb0f08f4, 0x74b60276, 0x0057bb03, +/* 0x013f: dispatch_valid_mthd */ 0xbbdf0ef4, 0x44b60246, 0x0045bb03, @@ -229,24 +275,33 @@ uint32_t nvc0_pcopy_code[] = { 0x64b00146, 0x091bf400, 0xf4005380, +/* 0x0166: dispatch_cmd */ 0x32f4300e, 0xf455f901, 0x0ef40c01, +/* 0x0171: dispatch_invalid_bitfield */ 0x0225f025, +/* 0x0174: dispatch_illegal_mthd */ +/* 0x0177: dispatch_error */ 0xf10125f0, 0xd0100047, 0x43d00042, 0x4027f040, +/* 0x0187: hostirq_wait */ 0xcf0002d0, 0x24f08002, 0x0024b040, +/* 0x0193: dispatch_done */ 0xf1f71bf4, 0xf01d0027, 0x23d00137, +/* 0x019f: cmd_nop */ 0xf800f800, +/* 0x01a1: cmd_pm_trigger */ 0x0027f100, 0xf034bd22, 0x23d00233, +/* 0x01af: cmd_exec_set_format */ 0xf400f800, 0x01b0f030, 0x0101b000, @@ -258,20 +313,26 @@ uint32_t nvc0_pcopy_code[] = { 0x3847c701, 0xf40170b6, 0x84bd0232, +/* 0x01da: ncomp_loop */ 0x4ac494bd, 0x0445b60f, +/* 0x01e2: bpc_loop */ 0xa430b4bd, 0x0f18f404, 0xbbc0a5ff, 0x31f400cb, 0x220ef402, +/* 0x01f4: cmp_c0 */ 0xf00c1bf4, 0xcbbb10c7, 0x160ef400, +/* 0x0200: cmp_c1 */ 0xf406a430, 0xc7f00c18, 0x00cbbb14, +/* 0x020f: cmp_zero */ 0xf1070ef4, +/* 0x0213: bpc_next */ 0x380080c7, 0x80b601c8, 0x01b0b601, @@ -283,6 +344,7 @@ uint32_t nvc0_pcopy_code[] = { 0x1d08980e, 0xf40068fd, 0x64bd0502, +/* 0x023c: dst_xcnt */ 0x800075fd, 0x78fd1907, 0x1057f100, @@ -307,15 +369,18 @@ uint32_t nvc0_pcopy_code[] = { 0x1c069800, 0xf44056d0, 0x00f81030, +/* 0x029c: cmd_exec_set_surface_tiled */ 0xc7075798, 0x78c76879, 0x0380b664, 0xb06077c7, 0x1bf40e76, 0x0477f009, +/* 0x02b7: xtile64 */ 0xf00f0ef4, 0x70b6027c, 0x0947fd11, +/* 0x02c3: xtileok */ 0x980677f0, 0x5b980c5a, 0x00abfd0e, @@ -374,6 +439,7 @@ uint32_t nvc0_pcopy_code[] = { 0xb70067d0, 0xd0040060, 0x00f80068, +/* 0x03a8: cmd_exec_set_surface_linear */ 0xb7026cf0, 0xb6020260, 0x57980864, @@ -384,12 +450,15 @@ uint32_t nvc0_pcopy_code[] = { 0x0060b700, 0x06579804, 0xf80067d0, +/* 0x03d1: cmd_exec_wait */ 0xf900f900, 0x0007f110, 0x0604b608, +/* 0x03dc: loop */ 0xf00001cf, 0x1bf40114, 0xfc10fcfa, +/* 0x03eb: cmd_exec_query */ 0xc800f800, 0x1bf40d34, 0xd121f570, @@ -419,6 +488,7 @@ uint32_t nvc0_pcopy_code[] = { 0x0153f026, 0x080047f1, 0xd00644b6, +/* 0x045e: query_counter */ 0x21f50045, 0x47f103d1, 0x44b6080c, @@ -442,11 +512,13 @@ uint32_t nvc0_pcopy_code[] = { 0x080047f1, 0xd00644b6, 0x00f80045, +/* 0x04b8: cmd_exec */ 0x03d121f5, 0xf4003fc8, 0x21f50e0b, 0x47f101af, 0x0ef40200, +/* 0x04cd: cmd_exec_no_format */ 0x1067f11e, 0x0664b608, 0x800177f0, @@ -454,18 +526,23 @@ uint32_t nvc0_pcopy_code[] = { 0x1d079819, 0xd00067d0, 0x44bd4067, +/* 0x04e8: cmd_exec_init_src_surface */ 0xbd0232f4, 0x043fc854, 0xf50a0bf4, 0xf403a821, +/* 0x04fa: src_tiled */ 0x21f50a0e, 0x49f0029c, +/* 0x0501: cmd_exec_init_dst_surface */ 0x0231f407, 0xc82c57f0, 0x0bf4083f, 0xa821f50a, 0x0a0ef403, +/* 0x0514: dst_tiled */ 0x029c21f5, +/* 0x051b: cmd_exec_kick */ 0xf10849f0, 0xb6080057, 0x06980654, @@ -475,7 +552,9 @@ uint32_t nvc0_pcopy_code[] = { 0x54d00546, 0x0c3fc800, 0xf5070bf4, +/* 0x053f: cmd_exec_done */ 0xf803eb21, +/* 0x0541: cmd_wrcache_flush */ 0x0027f100, 0xf034bd22, 0x23d00133, -- cgit v1.2.3 From 16fde6cd323a4f7654ac76dae12cb36208ed4c5d Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 8 Jun 2012 10:31:46 +1000 Subject: drm/nouveau: dcb table quirk for fdo#50830 Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bios.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 2f11e16a81a9..a0a3fe3c016b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -6091,6 +6091,18 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) } } + /* fdo#50830: connector indices for VGA and DVI-I are backwards */ + if (nv_match_device(dev, 0x0421, 0x3842, 0xc793)) { + if (idx == 0 && *conn == 0x02000300) + *conn = 0x02011300; + else + if (idx == 1 && *conn == 0x04011310) + *conn = 0x04000310; + else + if (idx == 2 && *conn == 0x02011312) + *conn = 0x02000312; + } + return true; } -- cgit v1.2.3 From e0dd536a7abbce564eff9929e0a3cf77ba2d2e2e Mon Sep 17 00:00:00 2001 From: Marcin Slusarz Date: Mon, 11 Jun 2012 00:21:12 +0200 Subject: drm/nv84: decode PCRYPT errors Signed-off-by: Marcin Slusarz Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv84_crypt.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c index edece9c616eb..bbfcc73b6708 100644 --- a/drivers/gpu/drm/nouveau/nv84_crypt.c +++ b/drivers/gpu/drm/nouveau/nv84_crypt.c @@ -117,18 +117,30 @@ nv84_crypt_tlb_flush(struct drm_device *dev, int engine) nv50_vm_flush_engine(dev, 0x0a); } +static struct nouveau_bitfield nv84_crypt_intr[] = { + { 0x00000001, "INVALID_STATE" }, + { 0x00000002, "ILLEGAL_MTHD" }, + { 0x00000004, "ILLEGAL_CLASS" }, + { 0x00000080, "QUERY" }, + { 0x00000100, "FAULT" }, + {} +}; + static void nv84_crypt_isr(struct drm_device *dev) { u32 stat = nv_rd32(dev, 0x102130); u32 mthd = nv_rd32(dev, 0x102190); u32 data = nv_rd32(dev, 0x102194); - u32 inst = nv_rd32(dev, 0x102188) & 0x7fffffff; + u64 inst = (u64)(nv_rd32(dev, 0x102188) & 0x7fffffff) << 12; int show = nouveau_ratelimit(); + int chid = nv50_graph_isr_chid(dev, inst); if (show) { - NV_INFO(dev, "PCRYPT_INTR: 0x%08x 0x%08x 0x%08x 0x%08x\n", - stat, mthd, data, inst); + NV_INFO(dev, "PCRYPT:"); + nouveau_bitfield_print(nv84_crypt_intr, stat); + printk(KERN_CONT " ch %d (0x%010llx) mthd 0x%04x data 0x%08x\n", + chid, inst, mthd, data); } nv_wr32(dev, 0x102130, stat); -- cgit v1.2.3 From 887cd78804fb4179211d221c023455c33f13206a Mon Sep 17 00:00:00 2001 From: Marcin Slusarz Date: Thu, 14 Jun 2012 23:58:08 +0200 Subject: drm/nv50: rename INVALID_QUERY_OR_TEXTURE error to INVALID_OPERATION Current name is misleading, because this error can be triggered by other conditions, like changing STRMOUT parameter without disabling STRMOUT first. Signed-off-by: Marcin Slusarz Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv50_graph.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index d9cc2f2638d6..437608d1dfe7 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -299,7 +299,7 @@ static struct nouveau_bitfield nv50_graph_trap_ccache[] = { /* There must be a *lot* of these. Will take some time to gather them up. */ struct nouveau_enum nv50_data_error_names[] = { - { 0x00000003, "INVALID_QUERY_OR_TEXTURE", NULL }, + { 0x00000003, "INVALID_OPERATION", NULL }, { 0x00000004, "INVALID_VALUE", NULL }, { 0x00000005, "INVALID_ENUM", NULL }, { 0x00000008, "INVALID_OBJECT", NULL }, -- cgit v1.2.3 From 5086f69eb960ff5984a2fdac9d238c76edfe57bc Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 25 Jun 2012 18:04:27 +1000 Subject: drm/nouveau/gem: fix object reference leak in a failure path Reported-by: Maarten Lankhorst Signed-off-by: Ben Skeggs Signed-off-by: Maarten Lankhorst --- drivers/gpu/drm/nouveau/nouveau_gem.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 30f542316944..34d0bc596213 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -342,6 +342,7 @@ retry: if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { NV_ERROR(dev, "multiple instances of buffer %d on " "validation list\n", b->handle); + drm_gem_object_unreference_unlocked(gem); validate_fini(op, NULL); return -EINVAL; } -- cgit v1.2.3 From ce22af03da2cd700332a384e240378cae1355773 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sat, 5 May 2012 22:19:20 +1000 Subject: drm/nouveau: move current gpuobj code out of nouveau_object.c I want this file for the new interfaces... Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/Makefile | 2 +- drivers/gpu/drm/nouveau/nouveau_drv.h | 2 +- drivers/gpu/drm/nouveau/nouveau_gpuobj.c | 868 +++++++++++++++++++++++++++++++ drivers/gpu/drm/nouveau/nouveau_object.c | 868 ------------------------------- 4 files changed, 870 insertions(+), 870 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nouveau_gpuobj.c delete mode 100644 drivers/gpu/drm/nouveau/nouveau_object.c diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index fe5267d06ab5..efa1aef35f3a 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -4,7 +4,7 @@ ccflags-y := -Iinclude/drm nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ - nouveau_object.o nouveau_irq.o nouveau_notifier.o \ + nouveau_gpuobj.o nouveau_irq.o nouveau_notifier.o \ nouveau_sgdma.o nouveau_dma.o nouveau_util.o \ nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 8613cb23808c..b0f8dd0373cd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -938,7 +938,7 @@ extern void nouveau_channel_ref(struct nouveau_channel *chan, struct nouveau_channel **pchan); extern int nouveau_channel_idle(struct nouveau_channel *chan); -/* nouveau_object.c */ +/* nouveau_gpuobj.c */ #define NVOBJ_ENGINE_ADD(d, e, p) do { \ struct drm_nouveau_private *dev_priv = (d)->dev_private; \ dev_priv->eng[NVOBJ_ENGINE_##e] = (p); \ diff --git a/drivers/gpu/drm/nouveau/nouveau_gpuobj.c b/drivers/gpu/drm/nouveau/nouveau_gpuobj.c new file mode 100644 index 000000000000..b190cc01c820 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_gpuobj.c @@ -0,0 +1,868 @@ +/* + * Copyright (C) 2006 Ben Skeggs. + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +/* + * Authors: + * Ben Skeggs + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" +#include "nouveau_fifo.h" +#include "nouveau_ramht.h" +#include "nouveau_software.h" +#include "nouveau_vm.h" + +struct nouveau_gpuobj_method { + struct list_head head; + u32 mthd; + int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data); +}; + +struct nouveau_gpuobj_class { + struct list_head head; + struct list_head methods; + u32 id; + u32 engine; +}; + +int +nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj_class *oc; + + oc = kzalloc(sizeof(*oc), GFP_KERNEL); + if (!oc) + return -ENOMEM; + + INIT_LIST_HEAD(&oc->methods); + oc->id = class; + oc->engine = engine; + list_add(&oc->head, &dev_priv->classes); + return 0; +} + +int +nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd, + int (*exec)(struct nouveau_channel *, u32, u32, u32)) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj_method *om; + struct nouveau_gpuobj_class *oc; + + list_for_each_entry(oc, &dev_priv->classes, head) { + if (oc->id == class) + goto found; + } + + return -EINVAL; + +found: + om = kzalloc(sizeof(*om), GFP_KERNEL); + if (!om) + return -ENOMEM; + + om->mthd = mthd; + om->exec = exec; + list_add(&om->head, &oc->methods); + return 0; +} + +int +nouveau_gpuobj_mthd_call(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) +{ + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + struct nouveau_gpuobj_method *om; + struct nouveau_gpuobj_class *oc; + + list_for_each_entry(oc, &dev_priv->classes, head) { + if (oc->id != class) + continue; + + list_for_each_entry(om, &oc->methods, head) { + if (om->mthd == mthd) + return om->exec(chan, class, mthd, data); + } + } + + return -ENOENT; +} + +int +nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid, + u32 class, u32 mthd, u32 data) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); + struct nouveau_channel *chan = NULL; + unsigned long flags; + int ret = -EINVAL; + + spin_lock_irqsave(&dev_priv->channels.lock, flags); + if (chid >= 0 && chid < pfifo->channels) + chan = dev_priv->channels.ptr[chid]; + if (chan) + ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data); + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + return ret; +} + +int +nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, + uint32_t size, int align, uint32_t flags, + struct nouveau_gpuobj **gpuobj_ret) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; + struct nouveau_gpuobj *gpuobj; + struct drm_mm_node *ramin = NULL; + int ret, i; + + NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", + chan ? chan->id : -1, size, align, flags); + + gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); + if (!gpuobj) + return -ENOMEM; + NV_DEBUG(dev, "gpuobj %p\n", gpuobj); + gpuobj->dev = dev; + gpuobj->flags = flags; + kref_init(&gpuobj->refcount); + gpuobj->size = size; + + spin_lock(&dev_priv->ramin_lock); + list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); + spin_unlock(&dev_priv->ramin_lock); + + if (!(flags & NVOBJ_FLAG_VM) && chan) { + ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); + if (ramin) + ramin = drm_mm_get_block(ramin, size, align); + if (!ramin) { + nouveau_gpuobj_ref(NULL, &gpuobj); + return -ENOMEM; + } + + gpuobj->pinst = chan->ramin->pinst; + if (gpuobj->pinst != ~0) + gpuobj->pinst += ramin->start; + + gpuobj->cinst = ramin->start; + gpuobj->vinst = ramin->start + chan->ramin->vinst; + gpuobj->node = ramin; + } else { + ret = instmem->get(gpuobj, chan, size, align); + if (ret) { + nouveau_gpuobj_ref(NULL, &gpuobj); + return ret; + } + + ret = -ENOSYS; + if (!(flags & NVOBJ_FLAG_DONT_MAP)) + ret = instmem->map(gpuobj); + if (ret) + gpuobj->pinst = ~0; + + gpuobj->cinst = NVOBJ_CINST_GLOBAL; + } + + if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { + for (i = 0; i < gpuobj->size; i += 4) + nv_wo32(gpuobj, i, 0); + instmem->flush(dev); + } + + + *gpuobj_ret = gpuobj; + return 0; +} + +int +nouveau_gpuobj_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + NV_DEBUG(dev, "\n"); + + INIT_LIST_HEAD(&dev_priv->gpuobj_list); + INIT_LIST_HEAD(&dev_priv->classes); + spin_lock_init(&dev_priv->ramin_lock); + dev_priv->ramin_base = ~0; + + return 0; +} + +void +nouveau_gpuobj_takedown(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj_method *om, *tm; + struct nouveau_gpuobj_class *oc, *tc; + + NV_DEBUG(dev, "\n"); + + list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) { + list_for_each_entry_safe(om, tm, &oc->methods, head) { + list_del(&om->head); + kfree(om); + } + list_del(&oc->head); + kfree(oc); + } + + WARN_ON(!list_empty(&dev_priv->gpuobj_list)); +} + + +static void +nouveau_gpuobj_del(struct kref *ref) +{ + struct nouveau_gpuobj *gpuobj = + container_of(ref, struct nouveau_gpuobj, refcount); + struct drm_device *dev = gpuobj->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; + int i; + + NV_DEBUG(dev, "gpuobj %p\n", gpuobj); + + if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { + for (i = 0; i < gpuobj->size; i += 4) + nv_wo32(gpuobj, i, 0); + instmem->flush(dev); + } + + if (gpuobj->dtor) + gpuobj->dtor(dev, gpuobj); + + if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) { + if (gpuobj->node) { + instmem->unmap(gpuobj); + instmem->put(gpuobj); + } + } else { + if (gpuobj->node) { + spin_lock(&dev_priv->ramin_lock); + drm_mm_put_block(gpuobj->node); + spin_unlock(&dev_priv->ramin_lock); + } + } + + spin_lock(&dev_priv->ramin_lock); + list_del(&gpuobj->list); + spin_unlock(&dev_priv->ramin_lock); + + kfree(gpuobj); +} + +void +nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr) +{ + if (ref) + kref_get(&ref->refcount); + + if (*ptr) + kref_put(&(*ptr)->refcount, nouveau_gpuobj_del); + + *ptr = ref; +} + +int +nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst, + u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj = NULL; + int i; + + NV_DEBUG(dev, + "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n", + pinst, vinst, size, flags); + + gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); + if (!gpuobj) + return -ENOMEM; + NV_DEBUG(dev, "gpuobj %p\n", gpuobj); + gpuobj->dev = dev; + gpuobj->flags = flags; + kref_init(&gpuobj->refcount); + gpuobj->size = size; + gpuobj->pinst = pinst; + gpuobj->cinst = NVOBJ_CINST_GLOBAL; + gpuobj->vinst = vinst; + + if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { + for (i = 0; i < gpuobj->size; i += 4) + nv_wo32(gpuobj, i, 0); + dev_priv->engine.instmem.flush(dev); + } + + spin_lock(&dev_priv->ramin_lock); + list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); + spin_unlock(&dev_priv->ramin_lock); + *pgpuobj = gpuobj; + return 0; +} + +void +nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class, + u64 base, u64 size, int target, int access, + u32 type, u32 comp) +{ + struct drm_nouveau_private *dev_priv = obj->dev->dev_private; + struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; + u32 flags0; + + flags0 = (comp << 29) | (type << 22) | class; + flags0 |= 0x00100000; + + switch (access) { + case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break; + case NV_MEM_ACCESS_RW: + case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break; + default: + break; + } + + switch (target) { + case NV_MEM_TARGET_VRAM: + flags0 |= 0x00010000; + break; + case NV_MEM_TARGET_PCI: + flags0 |= 0x00020000; + break; + case NV_MEM_TARGET_PCI_NOSNOOP: + flags0 |= 0x00030000; + break; + case NV_MEM_TARGET_GART: + base += dev_priv->gart_info.aper_base; + default: + flags0 &= ~0x00100000; + break; + } + + /* convert to base + limit */ + size = (base + size) - 1; + + nv_wo32(obj, offset + 0x00, flags0); + nv_wo32(obj, offset + 0x04, lower_32_bits(size)); + nv_wo32(obj, offset + 0x08, lower_32_bits(base)); + nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 | + upper_32_bits(base)); + nv_wo32(obj, offset + 0x10, 0x00000000); + nv_wo32(obj, offset + 0x14, 0x00000000); + + pinstmem->flush(obj->dev); +} + +int +nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size, + int target, int access, u32 type, u32 comp, + struct nouveau_gpuobj **pobj) +{ + struct drm_device *dev = chan->dev; + int ret; + + ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj); + if (ret) + return ret; + + nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target, + access, type, comp); + return 0; +} + +int +nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, + u64 size, int access, int target, + struct nouveau_gpuobj **pobj) +{ + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + struct drm_device *dev = chan->dev; + struct nouveau_gpuobj *obj; + u32 flags0, flags2; + int ret; + + if (dev_priv->card_type >= NV_50) { + u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0; + u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0; + + return nv50_gpuobj_dma_new(chan, class, base, size, + target, access, type, comp, pobj); + } + + if (target == NV_MEM_TARGET_GART) { + struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma; + + if (dev_priv->gart_info.type == NOUVEAU_GART_PDMA) { + if (base == 0) { + nouveau_gpuobj_ref(gart, pobj); + return 0; + } + + base = nouveau_sgdma_get_physical(dev, base); + target = NV_MEM_TARGET_PCI; + } else { + base += dev_priv->gart_info.aper_base; + if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) + target = NV_MEM_TARGET_PCI_NOSNOOP; + else + target = NV_MEM_TARGET_PCI; + } + } + + flags0 = class; + flags0 |= 0x00003000; /* PT present, PT linear */ + flags2 = 0; + + switch (target) { + case NV_MEM_TARGET_PCI: + flags0 |= 0x00020000; + break; + case NV_MEM_TARGET_PCI_NOSNOOP: + flags0 |= 0x00030000; + break; + default: + break; + } + + switch (access) { + case NV_MEM_ACCESS_RO: + flags0 |= 0x00004000; + break; + case NV_MEM_ACCESS_WO: + flags0 |= 0x00008000; + default: + flags2 |= 0x00000002; + break; + } + + flags0 |= (base & 0x00000fff) << 20; + flags2 |= (base & 0xfffff000); + + ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj); + if (ret) + return ret; + + nv_wo32(obj, 0x00, flags0); + nv_wo32(obj, 0x04, size - 1); + nv_wo32(obj, 0x08, flags2); + nv_wo32(obj, 0x0c, flags2); + + obj->engine = NVOBJ_ENGINE_SW; + obj->class = class; + *pobj = obj; + return 0; +} + +int +nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class) +{ + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + struct drm_device *dev = chan->dev; + struct nouveau_gpuobj_class *oc; + int ret; + + NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class); + + list_for_each_entry(oc, &dev_priv->classes, head) { + struct nouveau_exec_engine *eng = dev_priv->eng[oc->engine]; + + if (oc->id != class) + continue; + + if (!chan->engctx[oc->engine]) { + ret = eng->context_new(chan, oc->engine); + if (ret) + return ret; + } + + return eng->object_new(chan, oc->engine, handle, class); + } + + return -EINVAL; +} + +static int +nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t size; + uint32_t base; + int ret; + + NV_DEBUG(dev, "ch%d\n", chan->id); + + /* Base amount for object storage (4KiB enough?) */ + size = 0x2000; + base = 0; + + if (dev_priv->card_type == NV_50) { + /* Various fixed table thingos */ + size += 0x1400; /* mostly unknown stuff */ + size += 0x4000; /* vm pd */ + base = 0x6000; + /* RAMHT, not sure about setting size yet, 32KiB to be safe */ + size += 0x8000; + /* RAMFC */ + size += 0x1000; + } + + ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin); + if (ret) { + NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret); + return ret; + } + + ret = drm_mm_init(&chan->ramin_heap, base, size - base); + if (ret) { + NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); + nouveau_gpuobj_ref(NULL, &chan->ramin); + return ret; + } + + return 0; +} + +static int +nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm) +{ + struct drm_device *dev = chan->dev; + struct nouveau_gpuobj *pgd = NULL; + struct nouveau_vm_pgd *vpgd; + int ret; + + ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin); + if (ret) + return ret; + + /* create page directory for this vm if none currently exists, + * will be destroyed automagically when last reference to the + * vm is removed + */ + if (list_empty(&vm->pgd_list)) { + ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &pgd); + if (ret) + return ret; + } + nouveau_vm_ref(vm, &chan->vm, pgd); + nouveau_gpuobj_ref(NULL, &pgd); + + /* point channel at vm's page directory */ + vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head); + nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst)); + nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst)); + nv_wo32(chan->ramin, 0x0208, 0xffffffff); + nv_wo32(chan->ramin, 0x020c, 0x000000ff); + + return 0; +} + +int +nouveau_gpuobj_channel_init(struct nouveau_channel *chan, + uint32_t vram_h, uint32_t tt_h) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv); + struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm; + struct nouveau_gpuobj *vram = NULL, *tt = NULL; + int ret; + + NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); + if (dev_priv->card_type >= NV_C0) + return nvc0_gpuobj_channel_init(chan, vm); + + /* Allocate a chunk of memory for per-channel object storage */ + ret = nouveau_gpuobj_channel_init_pramin(chan); + if (ret) { + NV_ERROR(dev, "init pramin\n"); + return ret; + } + + /* NV50 VM + * - Allocate per-channel page-directory + * - Link with shared channel VM + */ + if (vm) { + u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; + u64 vm_vinst = chan->ramin->vinst + pgd_offs; + u32 vm_pinst = chan->ramin->pinst; + + if (vm_pinst != ~0) + vm_pinst += pgd_offs; + + ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000, + 0, &chan->vm_pd); + if (ret) + return ret; + + nouveau_vm_ref(vm, &chan->vm, chan->vm_pd); + } + + /* RAMHT */ + if (dev_priv->card_type < NV_50) { + nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL); + } else { + struct nouveau_gpuobj *ramht = NULL; + + ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16, + NVOBJ_FLAG_ZERO_ALLOC, &ramht); + if (ret) + return ret; + + ret = nouveau_ramht_new(dev, ramht, &chan->ramht); + nouveau_gpuobj_ref(NULL, &ramht); + if (ret) + return ret; + } + + /* VRAM ctxdma */ + if (dev_priv->card_type >= NV_50) { + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, + 0, (1ULL << 40), NV_MEM_ACCESS_RW, + NV_MEM_TARGET_VM, &vram); + if (ret) { + NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); + return ret; + } + } else { + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, + 0, dev_priv->fb_available_size, + NV_MEM_ACCESS_RW, + NV_MEM_TARGET_VRAM, &vram); + if (ret) { + NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); + return ret; + } + } + + ret = nouveau_ramht_insert(chan, vram_h, vram); + nouveau_gpuobj_ref(NULL, &vram); + if (ret) { + NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret); + return ret; + } + + /* TT memory ctxdma */ + if (dev_priv->card_type >= NV_50) { + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, + 0, (1ULL << 40), NV_MEM_ACCESS_RW, + NV_MEM_TARGET_VM, &tt); + } else { + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, + 0, dev_priv->gart_info.aper_size, + NV_MEM_ACCESS_RW, + NV_MEM_TARGET_GART, &tt); + } + + if (ret) { + NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret); + return ret; + } + + ret = nouveau_ramht_insert(chan, tt_h, tt); + nouveau_gpuobj_ref(NULL, &tt); + if (ret) { + NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret); + return ret; + } + + return 0; +} + +void +nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) +{ + NV_DEBUG(chan->dev, "ch%d\n", chan->id); + + nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); + nouveau_gpuobj_ref(NULL, &chan->vm_pd); + + if (drm_mm_initialized(&chan->ramin_heap)) + drm_mm_takedown(&chan->ramin_heap); + nouveau_gpuobj_ref(NULL, &chan->ramin); +} + +int +nouveau_gpuobj_suspend(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj; + int i; + + list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { + if (gpuobj->cinst != NVOBJ_CINST_GLOBAL) + continue; + + gpuobj->suspend = vmalloc(gpuobj->size); + if (!gpuobj->suspend) { + nouveau_gpuobj_resume(dev); + return -ENOMEM; + } + + for (i = 0; i < gpuobj->size; i += 4) + gpuobj->suspend[i/4] = nv_ro32(gpuobj, i); + } + + return 0; +} + +void +nouveau_gpuobj_resume(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj; + int i; + + list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { + if (!gpuobj->suspend) + continue; + + for (i = 0; i < gpuobj->size; i += 4) + nv_wo32(gpuobj, i, gpuobj->suspend[i/4]); + + vfree(gpuobj->suspend); + gpuobj->suspend = NULL; + } + + dev_priv->engine.instmem.flush(dev); +} + +int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_nouveau_grobj_alloc *init = data; + struct nouveau_channel *chan; + int ret; + + if (init->handle == ~0) + return -EINVAL; + + /* compatibility with userspace that assumes 506e for all chipsets */ + if (init->class == 0x506e) { + init->class = nouveau_software_class(dev); + if (init->class == 0x906e) + return 0; + } else + if (init->class == 0x906e) { + NV_ERROR(dev, "906e not supported yet\n"); + return -EINVAL; + } + + chan = nouveau_channel_get(file_priv, init->channel); + if (IS_ERR(chan)) + return PTR_ERR(chan); + + if (nouveau_ramht_find(chan, init->handle)) { + ret = -EEXIST; + goto out; + } + + ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class); + if (ret) { + NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", + ret, init->channel, init->handle); + } + +out: + nouveau_channel_put(&chan); + return ret; +} + +int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_nouveau_gpuobj_free *objfree = data; + struct nouveau_channel *chan; + int ret; + + chan = nouveau_channel_get(file_priv, objfree->channel); + if (IS_ERR(chan)) + return PTR_ERR(chan); + + /* Synchronize with the user channel */ + nouveau_channel_idle(chan); + + ret = nouveau_ramht_remove(chan, objfree->handle); + nouveau_channel_put(&chan); + return ret; +} + +u32 +nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset) +{ + struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; + struct drm_device *dev = gpuobj->dev; + unsigned long flags; + + if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { + u64 ptr = gpuobj->vinst + offset; + u32 base = ptr >> 16; + u32 val; + + spin_lock_irqsave(&dev_priv->vm_lock, flags); + if (dev_priv->ramin_base != base) { + dev_priv->ramin_base = base; + nv_wr32(dev, 0x001700, dev_priv->ramin_base); + } + val = nv_rd32(dev, 0x700000 + (ptr & 0xffff)); + spin_unlock_irqrestore(&dev_priv->vm_lock, flags); + return val; + } + + return nv_ri32(dev, gpuobj->pinst + offset); +} + +void +nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val) +{ + struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; + struct drm_device *dev = gpuobj->dev; + unsigned long flags; + + if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { + u64 ptr = gpuobj->vinst + offset; + u32 base = ptr >> 16; + + spin_lock_irqsave(&dev_priv->vm_lock, flags); + if (dev_priv->ramin_base != base) { + dev_priv->ramin_base = base; + nv_wr32(dev, 0x001700, dev_priv->ramin_base); + } + nv_wr32(dev, 0x700000 + (ptr & 0xffff), val); + spin_unlock_irqrestore(&dev_priv->vm_lock, flags); + return; + } + + nv_wi32(dev, gpuobj->pinst + offset, val); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c deleted file mode 100644 index b190cc01c820..000000000000 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ /dev/null @@ -1,868 +0,0 @@ -/* - * Copyright (C) 2006 Ben Skeggs. - * - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -/* - * Authors: - * Ben Skeggs - */ - -#include "drmP.h" -#include "drm.h" -#include "nouveau_drv.h" -#include "nouveau_drm.h" -#include "nouveau_fifo.h" -#include "nouveau_ramht.h" -#include "nouveau_software.h" -#include "nouveau_vm.h" - -struct nouveau_gpuobj_method { - struct list_head head; - u32 mthd; - int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data); -}; - -struct nouveau_gpuobj_class { - struct list_head head; - struct list_head methods; - u32 id; - u32 engine; -}; - -int -nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj_class *oc; - - oc = kzalloc(sizeof(*oc), GFP_KERNEL); - if (!oc) - return -ENOMEM; - - INIT_LIST_HEAD(&oc->methods); - oc->id = class; - oc->engine = engine; - list_add(&oc->head, &dev_priv->classes); - return 0; -} - -int -nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd, - int (*exec)(struct nouveau_channel *, u32, u32, u32)) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj_method *om; - struct nouveau_gpuobj_class *oc; - - list_for_each_entry(oc, &dev_priv->classes, head) { - if (oc->id == class) - goto found; - } - - return -EINVAL; - -found: - om = kzalloc(sizeof(*om), GFP_KERNEL); - if (!om) - return -ENOMEM; - - om->mthd = mthd; - om->exec = exec; - list_add(&om->head, &oc->methods); - return 0; -} - -int -nouveau_gpuobj_mthd_call(struct nouveau_channel *chan, - u32 class, u32 mthd, u32 data) -{ - struct drm_nouveau_private *dev_priv = chan->dev->dev_private; - struct nouveau_gpuobj_method *om; - struct nouveau_gpuobj_class *oc; - - list_for_each_entry(oc, &dev_priv->classes, head) { - if (oc->id != class) - continue; - - list_for_each_entry(om, &oc->methods, head) { - if (om->mthd == mthd) - return om->exec(chan, class, mthd, data); - } - } - - return -ENOENT; -} - -int -nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid, - u32 class, u32 mthd, u32 data) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); - struct nouveau_channel *chan = NULL; - unsigned long flags; - int ret = -EINVAL; - - spin_lock_irqsave(&dev_priv->channels.lock, flags); - if (chid >= 0 && chid < pfifo->channels) - chan = dev_priv->channels.ptr[chid]; - if (chan) - ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data); - spin_unlock_irqrestore(&dev_priv->channels.lock, flags); - return ret; -} - -int -nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, - uint32_t size, int align, uint32_t flags, - struct nouveau_gpuobj **gpuobj_ret) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; - struct nouveau_gpuobj *gpuobj; - struct drm_mm_node *ramin = NULL; - int ret, i; - - NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", - chan ? chan->id : -1, size, align, flags); - - gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); - if (!gpuobj) - return -ENOMEM; - NV_DEBUG(dev, "gpuobj %p\n", gpuobj); - gpuobj->dev = dev; - gpuobj->flags = flags; - kref_init(&gpuobj->refcount); - gpuobj->size = size; - - spin_lock(&dev_priv->ramin_lock); - list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); - spin_unlock(&dev_priv->ramin_lock); - - if (!(flags & NVOBJ_FLAG_VM) && chan) { - ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); - if (ramin) - ramin = drm_mm_get_block(ramin, size, align); - if (!ramin) { - nouveau_gpuobj_ref(NULL, &gpuobj); - return -ENOMEM; - } - - gpuobj->pinst = chan->ramin->pinst; - if (gpuobj->pinst != ~0) - gpuobj->pinst += ramin->start; - - gpuobj->cinst = ramin->start; - gpuobj->vinst = ramin->start + chan->ramin->vinst; - gpuobj->node = ramin; - } else { - ret = instmem->get(gpuobj, chan, size, align); - if (ret) { - nouveau_gpuobj_ref(NULL, &gpuobj); - return ret; - } - - ret = -ENOSYS; - if (!(flags & NVOBJ_FLAG_DONT_MAP)) - ret = instmem->map(gpuobj); - if (ret) - gpuobj->pinst = ~0; - - gpuobj->cinst = NVOBJ_CINST_GLOBAL; - } - - if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { - for (i = 0; i < gpuobj->size; i += 4) - nv_wo32(gpuobj, i, 0); - instmem->flush(dev); - } - - - *gpuobj_ret = gpuobj; - return 0; -} - -int -nouveau_gpuobj_init(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - - NV_DEBUG(dev, "\n"); - - INIT_LIST_HEAD(&dev_priv->gpuobj_list); - INIT_LIST_HEAD(&dev_priv->classes); - spin_lock_init(&dev_priv->ramin_lock); - dev_priv->ramin_base = ~0; - - return 0; -} - -void -nouveau_gpuobj_takedown(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj_method *om, *tm; - struct nouveau_gpuobj_class *oc, *tc; - - NV_DEBUG(dev, "\n"); - - list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) { - list_for_each_entry_safe(om, tm, &oc->methods, head) { - list_del(&om->head); - kfree(om); - } - list_del(&oc->head); - kfree(oc); - } - - WARN_ON(!list_empty(&dev_priv->gpuobj_list)); -} - - -static void -nouveau_gpuobj_del(struct kref *ref) -{ - struct nouveau_gpuobj *gpuobj = - container_of(ref, struct nouveau_gpuobj, refcount); - struct drm_device *dev = gpuobj->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; - int i; - - NV_DEBUG(dev, "gpuobj %p\n", gpuobj); - - if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { - for (i = 0; i < gpuobj->size; i += 4) - nv_wo32(gpuobj, i, 0); - instmem->flush(dev); - } - - if (gpuobj->dtor) - gpuobj->dtor(dev, gpuobj); - - if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) { - if (gpuobj->node) { - instmem->unmap(gpuobj); - instmem->put(gpuobj); - } - } else { - if (gpuobj->node) { - spin_lock(&dev_priv->ramin_lock); - drm_mm_put_block(gpuobj->node); - spin_unlock(&dev_priv->ramin_lock); - } - } - - spin_lock(&dev_priv->ramin_lock); - list_del(&gpuobj->list); - spin_unlock(&dev_priv->ramin_lock); - - kfree(gpuobj); -} - -void -nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr) -{ - if (ref) - kref_get(&ref->refcount); - - if (*ptr) - kref_put(&(*ptr)->refcount, nouveau_gpuobj_del); - - *ptr = ref; -} - -int -nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst, - u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *gpuobj = NULL; - int i; - - NV_DEBUG(dev, - "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n", - pinst, vinst, size, flags); - - gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); - if (!gpuobj) - return -ENOMEM; - NV_DEBUG(dev, "gpuobj %p\n", gpuobj); - gpuobj->dev = dev; - gpuobj->flags = flags; - kref_init(&gpuobj->refcount); - gpuobj->size = size; - gpuobj->pinst = pinst; - gpuobj->cinst = NVOBJ_CINST_GLOBAL; - gpuobj->vinst = vinst; - - if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { - for (i = 0; i < gpuobj->size; i += 4) - nv_wo32(gpuobj, i, 0); - dev_priv->engine.instmem.flush(dev); - } - - spin_lock(&dev_priv->ramin_lock); - list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); - spin_unlock(&dev_priv->ramin_lock); - *pgpuobj = gpuobj; - return 0; -} - -void -nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class, - u64 base, u64 size, int target, int access, - u32 type, u32 comp) -{ - struct drm_nouveau_private *dev_priv = obj->dev->dev_private; - struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; - u32 flags0; - - flags0 = (comp << 29) | (type << 22) | class; - flags0 |= 0x00100000; - - switch (access) { - case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break; - case NV_MEM_ACCESS_RW: - case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break; - default: - break; - } - - switch (target) { - case NV_MEM_TARGET_VRAM: - flags0 |= 0x00010000; - break; - case NV_MEM_TARGET_PCI: - flags0 |= 0x00020000; - break; - case NV_MEM_TARGET_PCI_NOSNOOP: - flags0 |= 0x00030000; - break; - case NV_MEM_TARGET_GART: - base += dev_priv->gart_info.aper_base; - default: - flags0 &= ~0x00100000; - break; - } - - /* convert to base + limit */ - size = (base + size) - 1; - - nv_wo32(obj, offset + 0x00, flags0); - nv_wo32(obj, offset + 0x04, lower_32_bits(size)); - nv_wo32(obj, offset + 0x08, lower_32_bits(base)); - nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 | - upper_32_bits(base)); - nv_wo32(obj, offset + 0x10, 0x00000000); - nv_wo32(obj, offset + 0x14, 0x00000000); - - pinstmem->flush(obj->dev); -} - -int -nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size, - int target, int access, u32 type, u32 comp, - struct nouveau_gpuobj **pobj) -{ - struct drm_device *dev = chan->dev; - int ret; - - ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj); - if (ret) - return ret; - - nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target, - access, type, comp); - return 0; -} - -int -nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, - u64 size, int access, int target, - struct nouveau_gpuobj **pobj) -{ - struct drm_nouveau_private *dev_priv = chan->dev->dev_private; - struct drm_device *dev = chan->dev; - struct nouveau_gpuobj *obj; - u32 flags0, flags2; - int ret; - - if (dev_priv->card_type >= NV_50) { - u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0; - u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0; - - return nv50_gpuobj_dma_new(chan, class, base, size, - target, access, type, comp, pobj); - } - - if (target == NV_MEM_TARGET_GART) { - struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma; - - if (dev_priv->gart_info.type == NOUVEAU_GART_PDMA) { - if (base == 0) { - nouveau_gpuobj_ref(gart, pobj); - return 0; - } - - base = nouveau_sgdma_get_physical(dev, base); - target = NV_MEM_TARGET_PCI; - } else { - base += dev_priv->gart_info.aper_base; - if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) - target = NV_MEM_TARGET_PCI_NOSNOOP; - else - target = NV_MEM_TARGET_PCI; - } - } - - flags0 = class; - flags0 |= 0x00003000; /* PT present, PT linear */ - flags2 = 0; - - switch (target) { - case NV_MEM_TARGET_PCI: - flags0 |= 0x00020000; - break; - case NV_MEM_TARGET_PCI_NOSNOOP: - flags0 |= 0x00030000; - break; - default: - break; - } - - switch (access) { - case NV_MEM_ACCESS_RO: - flags0 |= 0x00004000; - break; - case NV_MEM_ACCESS_WO: - flags0 |= 0x00008000; - default: - flags2 |= 0x00000002; - break; - } - - flags0 |= (base & 0x00000fff) << 20; - flags2 |= (base & 0xfffff000); - - ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj); - if (ret) - return ret; - - nv_wo32(obj, 0x00, flags0); - nv_wo32(obj, 0x04, size - 1); - nv_wo32(obj, 0x08, flags2); - nv_wo32(obj, 0x0c, flags2); - - obj->engine = NVOBJ_ENGINE_SW; - obj->class = class; - *pobj = obj; - return 0; -} - -int -nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class) -{ - struct drm_nouveau_private *dev_priv = chan->dev->dev_private; - struct drm_device *dev = chan->dev; - struct nouveau_gpuobj_class *oc; - int ret; - - NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class); - - list_for_each_entry(oc, &dev_priv->classes, head) { - struct nouveau_exec_engine *eng = dev_priv->eng[oc->engine]; - - if (oc->id != class) - continue; - - if (!chan->engctx[oc->engine]) { - ret = eng->context_new(chan, oc->engine); - if (ret) - return ret; - } - - return eng->object_new(chan, oc->engine, handle, class); - } - - return -EINVAL; -} - -static int -nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) -{ - struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t size; - uint32_t base; - int ret; - - NV_DEBUG(dev, "ch%d\n", chan->id); - - /* Base amount for object storage (4KiB enough?) */ - size = 0x2000; - base = 0; - - if (dev_priv->card_type == NV_50) { - /* Various fixed table thingos */ - size += 0x1400; /* mostly unknown stuff */ - size += 0x4000; /* vm pd */ - base = 0x6000; - /* RAMHT, not sure about setting size yet, 32KiB to be safe */ - size += 0x8000; - /* RAMFC */ - size += 0x1000; - } - - ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin); - if (ret) { - NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret); - return ret; - } - - ret = drm_mm_init(&chan->ramin_heap, base, size - base); - if (ret) { - NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); - nouveau_gpuobj_ref(NULL, &chan->ramin); - return ret; - } - - return 0; -} - -static int -nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm) -{ - struct drm_device *dev = chan->dev; - struct nouveau_gpuobj *pgd = NULL; - struct nouveau_vm_pgd *vpgd; - int ret; - - ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin); - if (ret) - return ret; - - /* create page directory for this vm if none currently exists, - * will be destroyed automagically when last reference to the - * vm is removed - */ - if (list_empty(&vm->pgd_list)) { - ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &pgd); - if (ret) - return ret; - } - nouveau_vm_ref(vm, &chan->vm, pgd); - nouveau_gpuobj_ref(NULL, &pgd); - - /* point channel at vm's page directory */ - vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head); - nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst)); - nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst)); - nv_wo32(chan->ramin, 0x0208, 0xffffffff); - nv_wo32(chan->ramin, 0x020c, 0x000000ff); - - return 0; -} - -int -nouveau_gpuobj_channel_init(struct nouveau_channel *chan, - uint32_t vram_h, uint32_t tt_h) -{ - struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv); - struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm; - struct nouveau_gpuobj *vram = NULL, *tt = NULL; - int ret; - - NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); - if (dev_priv->card_type >= NV_C0) - return nvc0_gpuobj_channel_init(chan, vm); - - /* Allocate a chunk of memory for per-channel object storage */ - ret = nouveau_gpuobj_channel_init_pramin(chan); - if (ret) { - NV_ERROR(dev, "init pramin\n"); - return ret; - } - - /* NV50 VM - * - Allocate per-channel page-directory - * - Link with shared channel VM - */ - if (vm) { - u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; - u64 vm_vinst = chan->ramin->vinst + pgd_offs; - u32 vm_pinst = chan->ramin->pinst; - - if (vm_pinst != ~0) - vm_pinst += pgd_offs; - - ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000, - 0, &chan->vm_pd); - if (ret) - return ret; - - nouveau_vm_ref(vm, &chan->vm, chan->vm_pd); - } - - /* RAMHT */ - if (dev_priv->card_type < NV_50) { - nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL); - } else { - struct nouveau_gpuobj *ramht = NULL; - - ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16, - NVOBJ_FLAG_ZERO_ALLOC, &ramht); - if (ret) - return ret; - - ret = nouveau_ramht_new(dev, ramht, &chan->ramht); - nouveau_gpuobj_ref(NULL, &ramht); - if (ret) - return ret; - } - - /* VRAM ctxdma */ - if (dev_priv->card_type >= NV_50) { - ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, - 0, (1ULL << 40), NV_MEM_ACCESS_RW, - NV_MEM_TARGET_VM, &vram); - if (ret) { - NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); - return ret; - } - } else { - ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, - 0, dev_priv->fb_available_size, - NV_MEM_ACCESS_RW, - NV_MEM_TARGET_VRAM, &vram); - if (ret) { - NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); - return ret; - } - } - - ret = nouveau_ramht_insert(chan, vram_h, vram); - nouveau_gpuobj_ref(NULL, &vram); - if (ret) { - NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret); - return ret; - } - - /* TT memory ctxdma */ - if (dev_priv->card_type >= NV_50) { - ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, - 0, (1ULL << 40), NV_MEM_ACCESS_RW, - NV_MEM_TARGET_VM, &tt); - } else { - ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, - 0, dev_priv->gart_info.aper_size, - NV_MEM_ACCESS_RW, - NV_MEM_TARGET_GART, &tt); - } - - if (ret) { - NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret); - return ret; - } - - ret = nouveau_ramht_insert(chan, tt_h, tt); - nouveau_gpuobj_ref(NULL, &tt); - if (ret) { - NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret); - return ret; - } - - return 0; -} - -void -nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) -{ - NV_DEBUG(chan->dev, "ch%d\n", chan->id); - - nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); - nouveau_gpuobj_ref(NULL, &chan->vm_pd); - - if (drm_mm_initialized(&chan->ramin_heap)) - drm_mm_takedown(&chan->ramin_heap); - nouveau_gpuobj_ref(NULL, &chan->ramin); -} - -int -nouveau_gpuobj_suspend(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *gpuobj; - int i; - - list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { - if (gpuobj->cinst != NVOBJ_CINST_GLOBAL) - continue; - - gpuobj->suspend = vmalloc(gpuobj->size); - if (!gpuobj->suspend) { - nouveau_gpuobj_resume(dev); - return -ENOMEM; - } - - for (i = 0; i < gpuobj->size; i += 4) - gpuobj->suspend[i/4] = nv_ro32(gpuobj, i); - } - - return 0; -} - -void -nouveau_gpuobj_resume(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *gpuobj; - int i; - - list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { - if (!gpuobj->suspend) - continue; - - for (i = 0; i < gpuobj->size; i += 4) - nv_wo32(gpuobj, i, gpuobj->suspend[i/4]); - - vfree(gpuobj->suspend); - gpuobj->suspend = NULL; - } - - dev_priv->engine.instmem.flush(dev); -} - -int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_nouveau_grobj_alloc *init = data; - struct nouveau_channel *chan; - int ret; - - if (init->handle == ~0) - return -EINVAL; - - /* compatibility with userspace that assumes 506e for all chipsets */ - if (init->class == 0x506e) { - init->class = nouveau_software_class(dev); - if (init->class == 0x906e) - return 0; - } else - if (init->class == 0x906e) { - NV_ERROR(dev, "906e not supported yet\n"); - return -EINVAL; - } - - chan = nouveau_channel_get(file_priv, init->channel); - if (IS_ERR(chan)) - return PTR_ERR(chan); - - if (nouveau_ramht_find(chan, init->handle)) { - ret = -EEXIST; - goto out; - } - - ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class); - if (ret) { - NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", - ret, init->channel, init->handle); - } - -out: - nouveau_channel_put(&chan); - return ret; -} - -int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_nouveau_gpuobj_free *objfree = data; - struct nouveau_channel *chan; - int ret; - - chan = nouveau_channel_get(file_priv, objfree->channel); - if (IS_ERR(chan)) - return PTR_ERR(chan); - - /* Synchronize with the user channel */ - nouveau_channel_idle(chan); - - ret = nouveau_ramht_remove(chan, objfree->handle); - nouveau_channel_put(&chan); - return ret; -} - -u32 -nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset) -{ - struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; - struct drm_device *dev = gpuobj->dev; - unsigned long flags; - - if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { - u64 ptr = gpuobj->vinst + offset; - u32 base = ptr >> 16; - u32 val; - - spin_lock_irqsave(&dev_priv->vm_lock, flags); - if (dev_priv->ramin_base != base) { - dev_priv->ramin_base = base; - nv_wr32(dev, 0x001700, dev_priv->ramin_base); - } - val = nv_rd32(dev, 0x700000 + (ptr & 0xffff)); - spin_unlock_irqrestore(&dev_priv->vm_lock, flags); - return val; - } - - return nv_ri32(dev, gpuobj->pinst + offset); -} - -void -nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val) -{ - struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; - struct drm_device *dev = gpuobj->dev; - unsigned long flags; - - if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { - u64 ptr = gpuobj->vinst + offset; - u32 base = ptr >> 16; - - spin_lock_irqsave(&dev_priv->vm_lock, flags); - if (dev_priv->ramin_base != base) { - dev_priv->ramin_base = base; - nv_wr32(dev, 0x001700, dev_priv->ramin_base); - } - nv_wr32(dev, 0x700000 + (ptr & 0xffff), val); - spin_unlock_irqrestore(&dev_priv->vm_lock, flags); - return; - } - - nv_wi32(dev, gpuobj->pinst + offset, val); -} -- cgit v1.2.3 From 2a259a3d84c4409918c5d094f0969da58283a947 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 8 May 2012 10:24:27 +1000 Subject: drm/nouveau: mark most of our ioctls as deprecated, move to compat layer These will be replaced in the near future, the code isn't yet stable enough for this merge window however. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/Makefile | 1 + drivers/gpu/drm/nouveau/nouveau_abi16.c | 245 +++++++++++++++++++++++++++++ drivers/gpu/drm/nouveau/nouveau_abi16.h | 83 ++++++++++ drivers/gpu/drm/nouveau/nouveau_channel.c | 95 ----------- drivers/gpu/drm/nouveau/nouveau_drv.c | 18 ++- drivers/gpu/drm/nouveau/nouveau_drv.h | 14 -- drivers/gpu/drm/nouveau/nouveau_gpuobj.c | 60 ------- drivers/gpu/drm/nouveau/nouveau_notifier.c | 23 --- drivers/gpu/drm/nouveau/nouveau_state.c | 74 --------- include/drm/nouveau_drm.h | 94 +---------- 10 files changed, 353 insertions(+), 354 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nouveau_abi16.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_abi16.h diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index efa1aef35f3a..1cece6a78f39 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -12,6 +12,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nouveau_hdmi.o nouveau_dp.o nouveau_ramht.o \ nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \ nouveau_mm.o nouveau_vm.o nouveau_mxm.o nouveau_gpio.o \ + nouveau_abi16.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_fb.o nv10_fb.o nv20_fb.o nv30_fb.o nv40_fb.o \ diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c new file mode 100644 index 000000000000..ff23d88880e5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -0,0 +1,245 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" + +#include "nouveau_drv.h" +#include "nouveau_dma.h" +#include "nouveau_abi16.h" +#include "nouveau_ramht.h" +#include "nouveau_software.h" + +int +nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_getparam *getparam = data; + + switch (getparam->param) { + case NOUVEAU_GETPARAM_CHIPSET_ID: + getparam->value = dev_priv->chipset; + break; + case NOUVEAU_GETPARAM_PCI_VENDOR: + getparam->value = dev->pci_vendor; + break; + case NOUVEAU_GETPARAM_PCI_DEVICE: + getparam->value = dev->pci_device; + break; + case NOUVEAU_GETPARAM_BUS_TYPE: + if (drm_pci_device_is_agp(dev)) + getparam->value = 0; + else + if (!pci_is_pcie(dev->pdev)) + getparam->value = 1; + else + getparam->value = 2; + break; + case NOUVEAU_GETPARAM_FB_SIZE: + getparam->value = dev_priv->fb_available_size; + break; + case NOUVEAU_GETPARAM_AGP_SIZE: + getparam->value = dev_priv->gart_info.aper_size; + break; + case NOUVEAU_GETPARAM_VM_VRAM_BASE: + getparam->value = 0; /* deprecated */ + break; + case NOUVEAU_GETPARAM_PTIMER_TIME: + getparam->value = dev_priv->engine.timer.read(dev); + break; + case NOUVEAU_GETPARAM_HAS_BO_USAGE: + getparam->value = 1; + break; + case NOUVEAU_GETPARAM_HAS_PAGEFLIP: + getparam->value = 1; + break; + case NOUVEAU_GETPARAM_GRAPH_UNITS: + /* NV40 and NV50 versions are quite different, but register + * address is the same. User is supposed to know the card + * family anyway... */ + if (dev_priv->chipset >= 0x40) { + getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS); + break; + } + /* FALLTHRU */ + default: + NV_DEBUG(dev, "unknown parameter %lld\n", getparam->param); + return -EINVAL; + } + + return 0; +} + +int +nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS) +{ + return -EINVAL; +} + +int +nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_channel_alloc *init = data; + struct nouveau_channel *chan; + int ret; + + if (!dev_priv->eng[NVOBJ_ENGINE_GR]) + return -ENODEV; + + if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) + return -EINVAL; + + ret = nouveau_channel_alloc(dev, &chan, file_priv, + init->fb_ctxdma_handle, + init->tt_ctxdma_handle); + if (ret) + return ret; + init->channel = chan->id; + + if (nouveau_vram_pushbuf == 0) { + if (chan->dma.ib_max) + init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | + NOUVEAU_GEM_DOMAIN_GART; + else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM) + init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; + else + init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; + } else { + init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; + } + + if (dev_priv->card_type < NV_C0) { + init->subchan[0].handle = 0x00000000; + init->subchan[0].grclass = 0x0000; + init->subchan[1].handle = NvSw; + init->subchan[1].grclass = NV_SW; + init->nr_subchan = 2; + } + + /* Named memory object area */ + ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, + &init->notifier_handle); + + if (ret == 0) + atomic_inc(&chan->users); /* userspace reference */ + nouveau_channel_put(&chan); + return ret; +} + +int +nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS) +{ + struct drm_nouveau_channel_free *req = data; + struct nouveau_channel *chan; + + chan = nouveau_channel_get(file_priv, req->channel); + if (IS_ERR(chan)) + return PTR_ERR(chan); + + list_del(&chan->list); + atomic_dec(&chan->users); + nouveau_channel_put(&chan); + return 0; +} + +int +nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS) +{ + struct drm_nouveau_grobj_alloc *init = data; + struct nouveau_channel *chan; + int ret; + + if (init->handle == ~0) + return -EINVAL; + + /* compatibility with userspace that assumes 506e for all chipsets */ + if (init->class == 0x506e) { + init->class = nouveau_software_class(dev); + if (init->class == 0x906e) + return 0; + } else + if (init->class == 0x906e) { + NV_ERROR(dev, "906e not supported yet\n"); + return -EINVAL; + } + + chan = nouveau_channel_get(file_priv, init->channel); + if (IS_ERR(chan)) + return PTR_ERR(chan); + + if (nouveau_ramht_find(chan, init->handle)) { + ret = -EEXIST; + goto out; + } + + ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class); + if (ret) { + NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", + ret, init->channel, init->handle); + } + +out: + nouveau_channel_put(&chan); + return ret; +} + +int +nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_notifierobj_alloc *na = data; + struct nouveau_channel *chan; + int ret; + + /* completely unnecessary for these chipsets... */ + if (unlikely(dev_priv->card_type >= NV_C0)) + return -EINVAL; + + chan = nouveau_channel_get(file_priv, na->channel); + if (IS_ERR(chan)) + return PTR_ERR(chan); + + ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000, + &na->offset); + nouveau_channel_put(&chan); + return ret; +} + +int +nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS) +{ + struct drm_nouveau_gpuobj_free *objfree = data; + struct nouveau_channel *chan; + int ret; + + chan = nouveau_channel_get(file_priv, objfree->channel); + if (IS_ERR(chan)) + return PTR_ERR(chan); + + /* Synchronize with the user channel */ + nouveau_channel_idle(chan); + + ret = nouveau_ramht_remove(chan, objfree->handle); + nouveau_channel_put(&chan); + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h new file mode 100644 index 000000000000..e6328b008a8c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h @@ -0,0 +1,83 @@ +#ifndef __NOUVEAU_ABI16_H__ +#define __NOUVEAU_ABI16_H__ + +#define ABI16_IOCTL_ARGS \ + struct drm_device *dev, void *data, struct drm_file *file_priv +int nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS); +int nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS); +int nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS); +int nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS); +int nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS); +int nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS); +int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS); + +struct drm_nouveau_channel_alloc { + uint32_t fb_ctxdma_handle; + uint32_t tt_ctxdma_handle; + + int channel; + uint32_t pushbuf_domains; + + /* Notifier memory */ + uint32_t notifier_handle; + + /* DRM-enforced subchannel assignments */ + struct { + uint32_t handle; + uint32_t grclass; + } subchan[8]; + uint32_t nr_subchan; +}; + +struct drm_nouveau_channel_free { + int channel; +}; + +struct drm_nouveau_grobj_alloc { + int channel; + uint32_t handle; + int class; +}; + +struct drm_nouveau_notifierobj_alloc { + uint32_t channel; + uint32_t handle; + uint32_t size; + uint32_t offset; +}; + +struct drm_nouveau_gpuobj_free { + int channel; + uint32_t handle; +}; + +#define NOUVEAU_GETPARAM_PCI_VENDOR 3 +#define NOUVEAU_GETPARAM_PCI_DEVICE 4 +#define NOUVEAU_GETPARAM_BUS_TYPE 5 +#define NOUVEAU_GETPARAM_FB_SIZE 8 +#define NOUVEAU_GETPARAM_AGP_SIZE 9 +#define NOUVEAU_GETPARAM_CHIPSET_ID 11 +#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 +#define NOUVEAU_GETPARAM_GRAPH_UNITS 13 +#define NOUVEAU_GETPARAM_PTIMER_TIME 14 +#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15 +#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16 +struct drm_nouveau_getparam { + uint64_t param; + uint64_t value; +}; + +struct drm_nouveau_setparam { + uint64_t param; + uint64_t value; +}; + +#define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam) +#define DRM_IOCTL_NOUVEAU_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SETPARAM, struct drm_nouveau_setparam) +#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc) +#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free) +#define DRM_IOCTL_NOUVEAU_GROBJ_ALLOC DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GROBJ_ALLOC, struct drm_nouveau_grobj_alloc) +#define DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, struct drm_nouveau_notifierobj_alloc) +#define DRM_IOCTL_NOUVEAU_GPUOBJ_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GPUOBJ_FREE, struct drm_nouveau_gpuobj_free) + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 629d8a2df5bd..debd90225a88 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -395,98 +395,3 @@ nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) nouveau_channel_put(&chan); } } - - -/*********************************** - * ioctls wrapping the functions - ***********************************/ - -static int -nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct drm_nouveau_channel_alloc *init = data; - struct nouveau_channel *chan; - int ret; - - if (!dev_priv->eng[NVOBJ_ENGINE_GR]) - return -ENODEV; - - if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) - return -EINVAL; - - ret = nouveau_channel_alloc(dev, &chan, file_priv, - init->fb_ctxdma_handle, - init->tt_ctxdma_handle); - if (ret) - return ret; - init->channel = chan->id; - - if (nouveau_vram_pushbuf == 0) { - if (chan->dma.ib_max) - init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | - NOUVEAU_GEM_DOMAIN_GART; - else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM) - init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; - else - init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; - } else { - init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; - } - - if (dev_priv->card_type < NV_C0) { - init->subchan[0].handle = 0x00000000; - init->subchan[0].grclass = 0x0000; - init->subchan[1].handle = NvSw; - init->subchan[1].grclass = NV_SW; - init->nr_subchan = 2; - } - - /* Named memory object area */ - ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, - &init->notifier_handle); - - if (ret == 0) - atomic_inc(&chan->users); /* userspace reference */ - nouveau_channel_put(&chan); - return ret; -} - -static int -nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_nouveau_channel_free *req = data; - struct nouveau_channel *chan; - - chan = nouveau_channel_get(file_priv, req->channel); - if (IS_ERR(chan)) - return PTR_ERR(chan); - - list_del(&chan->list); - atomic_dec(&chan->users); - nouveau_channel_put(&chan); - return 0; -} - -/*********************************** - * finally, the ioctl table - ***********************************/ - -struct drm_ioctl_desc nouveau_ioctls[] = { - DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH), -}; - -int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index cad254c8e387..727447d296f1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -29,6 +29,7 @@ #include "drm.h" #include "drm_crtc_helper.h" #include "nouveau_drv.h" +#include "nouveau_abi16.h" #include "nouveau_hw.h" #include "nouveau_fb.h" #include "nouveau_fbcon.h" @@ -384,6 +385,21 @@ nouveau_pci_resume(struct pci_dev *pdev) return 0; } +static struct drm_ioctl_desc nouveau_ioctls[] = { + DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH), +}; + static const struct file_operations nouveau_driver_fops = { .owner = THIS_MODULE, .open = drm_open, @@ -463,7 +479,7 @@ static struct pci_driver nouveau_pci_driver = { static int __init nouveau_init(void) { - driver.num_ioctls = nouveau_max_ioctl; + driver.num_ioctls = ARRAY_SIZE(nouveau_ioctls); if (nouveau_modeset == -1) { #ifdef CONFIG_VGA_CONSOLE diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index b0f8dd0373cd..a5dc98495125 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -872,10 +872,6 @@ extern int nouveau_load(struct drm_device *, unsigned long flags); extern int nouveau_firstopen(struct drm_device *); extern void nouveau_lastclose(struct drm_device *); extern int nouveau_unload(struct drm_device *); -extern int nouveau_ioctl_getparam(struct drm_device *, void *data, - struct drm_file *); -extern int nouveau_ioctl_setparam(struct drm_device *, void *data, - struct drm_file *); extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout, uint32_t reg, uint32_t mask, uint32_t val); extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout, @@ -915,14 +911,8 @@ extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, int cout, uint32_t start, uint32_t end, uint32_t *offset); extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *); -extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, - struct drm_file *); -extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data, - struct drm_file *); /* nouveau_channel.c */ -extern struct drm_ioctl_desc nouveau_ioctls[]; -extern int nouveau_max_ioctl; extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *); extern int nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan, @@ -993,10 +983,6 @@ extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base, extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset, int class, u64 base, u64 size, int target, int access, u32 type, u32 comp); -extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data, - struct drm_file *); -extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, - struct drm_file *); /* nouveau_irq.c */ extern int nouveau_irq_init(struct drm_device *); diff --git a/drivers/gpu/drm/nouveau/nouveau_gpuobj.c b/drivers/gpu/drm/nouveau/nouveau_gpuobj.c index b190cc01c820..bd79fedb7054 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gpuobj.c +++ b/drivers/gpu/drm/nouveau/nouveau_gpuobj.c @@ -758,66 +758,6 @@ nouveau_gpuobj_resume(struct drm_device *dev) dev_priv->engine.instmem.flush(dev); } -int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_nouveau_grobj_alloc *init = data; - struct nouveau_channel *chan; - int ret; - - if (init->handle == ~0) - return -EINVAL; - - /* compatibility with userspace that assumes 506e for all chipsets */ - if (init->class == 0x506e) { - init->class = nouveau_software_class(dev); - if (init->class == 0x906e) - return 0; - } else - if (init->class == 0x906e) { - NV_ERROR(dev, "906e not supported yet\n"); - return -EINVAL; - } - - chan = nouveau_channel_get(file_priv, init->channel); - if (IS_ERR(chan)) - return PTR_ERR(chan); - - if (nouveau_ramht_find(chan, init->handle)) { - ret = -EEXIST; - goto out; - } - - ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class); - if (ret) { - NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", - ret, init->channel, init->handle); - } - -out: - nouveau_channel_put(&chan); - return ret; -} - -int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_nouveau_gpuobj_free *objfree = data; - struct nouveau_channel *chan; - int ret; - - chan = nouveau_channel_get(file_priv, objfree->channel); - if (IS_ERR(chan)) - return PTR_ERR(chan); - - /* Synchronize with the user channel */ - nouveau_channel_idle(chan); - - ret = nouveau_ramht_remove(chan, objfree->handle); - nouveau_channel_put(&chan); - return ret; -} - u32 nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset) { diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 2ef883c4bbc1..aa549155dcc1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -179,26 +179,3 @@ nouveau_notifier_offset(struct nouveau_gpuobj *nobj, uint32_t *poffset) return 0; } - -int -nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct drm_nouveau_notifierobj_alloc *na = data; - struct nouveau_channel *chan; - int ret; - - /* completely unnecessary for these chipsets... */ - if (unlikely(dev_priv->card_type >= NV_C0)) - return -EINVAL; - - chan = nouveau_channel_get(file_priv, na->channel); - if (IS_ERR(chan)) - return PTR_ERR(chan); - - ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000, - &na->offset); - nouveau_channel_put(&chan); - return ret; -} diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 19706f0532ea..1cdfd6e757ce 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -1234,80 +1234,6 @@ int nouveau_unload(struct drm_device *dev) return 0; } -int nouveau_ioctl_getparam(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct drm_nouveau_getparam *getparam = data; - - switch (getparam->param) { - case NOUVEAU_GETPARAM_CHIPSET_ID: - getparam->value = dev_priv->chipset; - break; - case NOUVEAU_GETPARAM_PCI_VENDOR: - getparam->value = dev->pci_vendor; - break; - case NOUVEAU_GETPARAM_PCI_DEVICE: - getparam->value = dev->pci_device; - break; - case NOUVEAU_GETPARAM_BUS_TYPE: - if (drm_pci_device_is_agp(dev)) - getparam->value = NV_AGP; - else if (pci_is_pcie(dev->pdev)) - getparam->value = NV_PCIE; - else - getparam->value = NV_PCI; - break; - case NOUVEAU_GETPARAM_FB_SIZE: - getparam->value = dev_priv->fb_available_size; - break; - case NOUVEAU_GETPARAM_AGP_SIZE: - getparam->value = dev_priv->gart_info.aper_size; - break; - case NOUVEAU_GETPARAM_VM_VRAM_BASE: - getparam->value = 0; /* deprecated */ - break; - case NOUVEAU_GETPARAM_PTIMER_TIME: - getparam->value = dev_priv->engine.timer.read(dev); - break; - case NOUVEAU_GETPARAM_HAS_BO_USAGE: - getparam->value = 1; - break; - case NOUVEAU_GETPARAM_HAS_PAGEFLIP: - getparam->value = 1; - break; - case NOUVEAU_GETPARAM_GRAPH_UNITS: - /* NV40 and NV50 versions are quite different, but register - * address is the same. User is supposed to know the card - * family anyway... */ - if (dev_priv->chipset >= 0x40) { - getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS); - break; - } - /* FALLTHRU */ - default: - NV_DEBUG(dev, "unknown parameter %lld\n", getparam->param); - return -EINVAL; - } - - return 0; -} - -int -nouveau_ioctl_setparam(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_nouveau_setparam *setparam = data; - - switch (setparam->param) { - default: - NV_DEBUG(dev, "unknown parameter %lld\n", setparam->param); - return -EINVAL; - } - - return 0; -} - /* Wait until (value(reg) & mask) == val, up until timeout has hit */ bool nouveau_wait_eq(struct drm_device *dev, uint64_t timeout, diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h index 5edd3a76fffa..2a5769fdf8ba 100644 --- a/include/drm/nouveau_drm.h +++ b/include/drm/nouveau_drm.h @@ -25,70 +25,6 @@ #ifndef __NOUVEAU_DRM_H__ #define __NOUVEAU_DRM_H__ -#define NOUVEAU_DRM_HEADER_PATCHLEVEL 16 - -struct drm_nouveau_channel_alloc { - uint32_t fb_ctxdma_handle; - uint32_t tt_ctxdma_handle; - - int channel; - uint32_t pushbuf_domains; - - /* Notifier memory */ - uint32_t notifier_handle; - - /* DRM-enforced subchannel assignments */ - struct { - uint32_t handle; - uint32_t grclass; - } subchan[8]; - uint32_t nr_subchan; -}; - -struct drm_nouveau_channel_free { - int channel; -}; - -struct drm_nouveau_grobj_alloc { - int channel; - uint32_t handle; - int class; -}; - -struct drm_nouveau_notifierobj_alloc { - uint32_t channel; - uint32_t handle; - uint32_t size; - uint32_t offset; -}; - -struct drm_nouveau_gpuobj_free { - int channel; - uint32_t handle; -}; - -/* FIXME : maybe unify {GET,SET}PARAMs */ -#define NOUVEAU_GETPARAM_PCI_VENDOR 3 -#define NOUVEAU_GETPARAM_PCI_DEVICE 4 -#define NOUVEAU_GETPARAM_BUS_TYPE 5 -#define NOUVEAU_GETPARAM_FB_SIZE 8 -#define NOUVEAU_GETPARAM_AGP_SIZE 9 -#define NOUVEAU_GETPARAM_CHIPSET_ID 11 -#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 -#define NOUVEAU_GETPARAM_GRAPH_UNITS 13 -#define NOUVEAU_GETPARAM_PTIMER_TIME 14 -#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15 -#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16 -struct drm_nouveau_getparam { - uint64_t param; - uint64_t value; -}; - -struct drm_nouveau_setparam { - uint64_t param; - uint64_t value; -}; - #define NOUVEAU_GEM_DOMAIN_CPU (1 << 0) #define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1) #define NOUVEAU_GEM_DOMAIN_GART (1 << 2) @@ -180,35 +116,19 @@ struct drm_nouveau_gem_cpu_fini { uint32_t handle; }; -enum nouveau_bus_type { - NV_AGP = 0, - NV_PCI = 1, - NV_PCIE = 2, -}; - -struct drm_nouveau_sarea { -}; - -#define DRM_NOUVEAU_GETPARAM 0x00 -#define DRM_NOUVEAU_SETPARAM 0x01 -#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02 -#define DRM_NOUVEAU_CHANNEL_FREE 0x03 -#define DRM_NOUVEAU_GROBJ_ALLOC 0x04 -#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 -#define DRM_NOUVEAU_GPUOBJ_FREE 0x06 +#define DRM_NOUVEAU_GETPARAM 0x00 /* deprecated */ +#define DRM_NOUVEAU_SETPARAM 0x01 /* deprecated */ +#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02 /* deprecated */ +#define DRM_NOUVEAU_CHANNEL_FREE 0x03 /* deprecated */ +#define DRM_NOUVEAU_GROBJ_ALLOC 0x04 /* deprecated */ +#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */ +#define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */ #define DRM_NOUVEAU_GEM_NEW 0x40 #define DRM_NOUVEAU_GEM_PUSHBUF 0x41 #define DRM_NOUVEAU_GEM_CPU_PREP 0x42 #define DRM_NOUVEAU_GEM_CPU_FINI 0x43 #define DRM_NOUVEAU_GEM_INFO 0x44 -#define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam) -#define DRM_IOCTL_NOUVEAU_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SETPARAM, struct drm_nouveau_setparam) -#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc) -#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free) -#define DRM_IOCTL_NOUVEAU_GROBJ_ALLOC DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GROBJ_ALLOC, struct drm_nouveau_grobj_alloc) -#define DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, struct drm_nouveau_notifierobj_alloc) -#define DRM_IOCTL_NOUVEAU_GPUOBJ_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GPUOBJ_FREE, struct drm_nouveau_gpuobj_free) #define DRM_IOCTL_NOUVEAU_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_NEW, struct drm_nouveau_gem_new) #define DRM_IOCTL_NOUVEAU_GEM_PUSHBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_PUSHBUF, struct drm_nouveau_gem_pushbuf) #define DRM_IOCTL_NOUVEAU_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_PREP, struct drm_nouveau_gem_cpu_prep) -- cgit v1.2.3 From 0ade74b6c6843892b52e6c2a4ac02183242eec27 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 8 May 2012 11:22:27 +1000 Subject: drm/nv50: extend vblank semaphore to generic dmaobj + offset pair Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 - drivers/gpu/drm/nouveau/nouveau_notifier.c | 18 ------------------ drivers/gpu/drm/nouveau/nouveau_software.h | 22 ++++------------------ drivers/gpu/drm/nouveau/nv50_display.c | 25 ++++++++++++++++++++++++- drivers/gpu/drm/nouveau/nv50_software.c | 19 ++++--------------- 5 files changed, 32 insertions(+), 53 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index a5dc98495125..03da38373552 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -910,7 +910,6 @@ extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, int cout, uint32_t start, uint32_t end, uint32_t *offset); -extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *); /* nouveau_channel.c */ extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *); diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index aa549155dcc1..69c93b864519 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -161,21 +161,3 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, *b_offset = mem->start; return 0; } - -int -nouveau_notifier_offset(struct nouveau_gpuobj *nobj, uint32_t *poffset) -{ - if (!nobj || nobj->dtor != nouveau_notifier_gpuobj_dtor) - return -EINVAL; - - if (poffset) { - struct drm_mm_node *mem = nobj->priv; - - if (*poffset >= mem->size) - return false; - - *poffset += mem->start; - } - - return 0; -} diff --git a/drivers/gpu/drm/nouveau/nouveau_software.h b/drivers/gpu/drm/nouveau/nouveau_software.h index e60bc6ce9003..9914cf154f65 100644 --- a/drivers/gpu/drm/nouveau/nouveau_software.h +++ b/drivers/gpu/drm/nouveau/nouveau_software.h @@ -4,36 +4,21 @@ struct nouveau_software_priv { struct nouveau_exec_engine base; struct list_head vblank; + spinlock_t peephole_lock; }; struct nouveau_software_chan { struct list_head flip; struct { struct list_head list; - struct nouveau_bo *bo; + u32 channel; + u32 ctxdma; u32 offset; u32 value; u32 head; } vblank; }; -static inline void -nouveau_software_vblank(struct drm_device *dev, int crtc) -{ - struct nouveau_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW); - struct nouveau_software_chan *pch, *tmp; - - list_for_each_entry_safe(pch, tmp, &psw->vblank, vblank.list) { - if (pch->vblank.head != crtc) - continue; - - nouveau_bo_wr32(pch->vblank.bo, pch->vblank.offset, - pch->vblank.value); - list_del(&pch->vblank.list); - drm_vblank_put(dev, crtc); - } -} - static inline void nouveau_software_context_new(struct nouveau_software_chan *pch) { @@ -44,6 +29,7 @@ static inline void nouveau_software_create(struct nouveau_software_priv *psw) { INIT_LIST_HEAD(&psw->vblank); + spin_lock_init(&psw->peephole_lock); } static inline u16 diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 5c41612723b4..b244d9968c5d 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -646,7 +646,30 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb, static void nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc) { - nouveau_software_vblank(dev, crtc); + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW); + struct nouveau_software_chan *pch, *tmp; + + list_for_each_entry_safe(pch, tmp, &psw->vblank, vblank.list) { + if (pch->vblank.head != crtc) + continue; + + spin_lock(&psw->peephole_lock); + nv_wr32(dev, 0x001704, pch->vblank.channel); + nv_wr32(dev, 0x001710, 0x80000000 | pch->vblank.ctxdma); + if (dev_priv->chipset == 0x50) { + nv_wr32(dev, 0x001570, pch->vblank.offset); + nv_wr32(dev, 0x001574, pch->vblank.value); + } else { + nv_wr32(dev, 0x060010, pch->vblank.offset); + nv_wr32(dev, 0x060014, pch->vblank.value); + } + spin_unlock(&psw->peephole_lock); + + list_del(&pch->vblank.list); + drm_vblank_put(dev, crtc); + } + drm_handle_vblank(dev, crtc); } diff --git a/drivers/gpu/drm/nouveau/nv50_software.c b/drivers/gpu/drm/nouveau/nv50_software.c index 114d2517d4a8..df554d9dacb8 100644 --- a/drivers/gpu/drm/nouveau/nv50_software.c +++ b/drivers/gpu/drm/nouveau/nv50_software.c @@ -36,9 +36,6 @@ struct nv50_software_priv { struct nv50_software_chan { struct nouveau_software_chan base; - struct { - struct nouveau_gpuobj *object; - } vblank; }; static int @@ -51,11 +48,7 @@ mthd_dma_vblsem(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data) if (!gpuobj) return -ENOENT; - if (nouveau_notifier_offset(gpuobj, NULL)) - return -EINVAL; - - pch->vblank.object = gpuobj; - pch->base.vblank.offset = ~0; + pch->base.vblank.ctxdma = gpuobj->cinst >> 4; return 0; } @@ -63,11 +56,7 @@ static int mthd_vblsem_offset(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data) { struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW]; - - if (nouveau_notifier_offset(pch->vblank.object, &data)) - return -ERANGE; - - pch->base.vblank.offset = data >> 2; + pch->base.vblank.offset = data; return 0; } @@ -86,7 +75,7 @@ mthd_vblsem_release(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data) struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW]; struct drm_device *dev = chan->dev; - if (!pch->vblank.object || pch->base.vblank.offset == ~0 || data > 1) + if (data > 1) return -EINVAL; drm_vblank_get(dev, data); @@ -116,7 +105,7 @@ nv50_software_context_new(struct nouveau_channel *chan, int engine) return -ENOMEM; nouveau_software_context_new(&pch->base); - pch->base.vblank.bo = chan->notifier_bo; + pch->base.vblank.channel = chan->ramin->vinst >> 12; chan->engctx[engine] = pch; /* dma objects for display sync channel semaphore blocks */ -- cgit v1.2.3 From 715855457e6bc93e148caf8cb3b5dcabbf605b0d Mon Sep 17 00:00:00 2001 From: Marcin Slusarz Date: Wed, 25 Jul 2012 20:42:05 +0200 Subject: drm/nouveau: init vblank requests list MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes kernel panic when vblank interrupt triggers before first sync to vblank request. (Besides init, remove some relevant leftovers from vblank rework) Reported-by: Ortwin Glück Signed-off-by: Marcin Slusarz Cc: stable@vger.kernel.org [3.5] Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 2 -- drivers/gpu/drm/nouveau/nouveau_irq.c | 4 ---- drivers/gpu/drm/nouveau/nouveau_software.h | 1 + 3 files changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 03da38373552..4f2cc95ce264 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -689,8 +689,6 @@ struct drm_nouveau_private { void (*irq_handler[32])(struct drm_device *); bool msi_enabled; - struct list_head vbl_waiting; - struct { struct drm_global_reference mem_global_ref; struct ttm_bo_global_ref bo_global_ref; diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 868c7fd74854..b2c2937531a8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -41,12 +41,8 @@ void nouveau_irq_preinstall(struct drm_device *dev) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - /* Master disable */ nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); - - INIT_LIST_HEAD(&dev_priv->vbl_waiting); } int diff --git a/drivers/gpu/drm/nouveau/nouveau_software.h b/drivers/gpu/drm/nouveau/nouveau_software.h index 9914cf154f65..709e5ac680ec 100644 --- a/drivers/gpu/drm/nouveau/nouveau_software.h +++ b/drivers/gpu/drm/nouveau/nouveau_software.h @@ -23,6 +23,7 @@ static inline void nouveau_software_context_new(struct nouveau_software_chan *pch) { INIT_LIST_HEAD(&pch->flip); + INIT_LIST_HEAD(&pch->vblank.list); } static inline void -- cgit v1.2.3