summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/display/intel_bw.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/display/intel_bw.c')
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c312
1 files changed, 195 insertions, 117 deletions
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index adf58c58513b..37bd7b17f3d0 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -6,6 +6,7 @@
#include <drm/drm_atomic_state_helper.h>
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
@@ -124,8 +125,8 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
/* bspec says to keep retrying for at least 1 ms */
ret = skl_pcode_request(dev_priv, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
points_mask,
- ICL_PCODE_POINTS_RESTRICTED_MASK,
- ICL_PCODE_POINTS_RESTRICTED,
+ ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
+ ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
1);
if (ret < 0) {
@@ -464,20 +465,25 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
static void dg2_get_bw_info(struct drm_i915_private *i915)
{
- struct intel_bw_info *bi = &i915->max_bw[0];
+ unsigned int deratedbw = IS_DG2_G11(i915) ? 38000 : 50000;
+ int num_groups = ARRAY_SIZE(i915->max_bw);
+ int i;
/*
* DG2 doesn't have SAGV or QGV points, just a constant max bandwidth
- * that doesn't depend on the number of planes enabled. Create a
- * single dummy QGV point to reflect that. DG2-G10 platforms have a
- * constant 50 GB/s bandwidth, whereas DG2-G11 platforms have 38 GB/s.
+ * that doesn't depend on the number of planes enabled. So fill all the
+ * plane group with constant bw information for uniformity with other
+ * platforms. DG2-G10 platforms have a constant 50 GB/s bandwidth,
+ * whereas DG2-G11 platforms have 38 GB/s.
*/
- bi->num_planes = 1;
- bi->num_qgv_points = 1;
- if (IS_DG2_G11(i915))
- bi->deratedbw[0] = 38000;
- else
- bi->deratedbw[0] = 50000;
+ for (i = 0; i < num_groups; i++) {
+ struct intel_bw_info *bi = &i915->max_bw[i];
+
+ bi->num_planes = 1;
+ /* Need only one dummy QGV point per group */
+ bi->num_qgv_points = 1;
+ bi->deratedbw[0] = deratedbw;
+ }
i915->sagv_status = I915_SAGV_NOT_CONTROLLED;
}
@@ -578,6 +584,7 @@ static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_stat
static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
unsigned int data_rate = 0;
enum plane_id plane_id;
@@ -590,11 +597,26 @@ static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_
continue;
data_rate += crtc_state->data_rate[plane_id];
+
+ if (DISPLAY_VER(i915) < 11)
+ data_rate += crtc_state->data_rate_y[plane_id];
}
return data_rate;
}
+/* "Maximum Pipe Read Bandwidth" */
+static int intel_bw_crtc_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (DISPLAY_VER(i915) < 12)
+ return 0;
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(intel_bw_crtc_data_rate(crtc_state), 10), 512);
+}
+
void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state)
{
@@ -633,8 +655,8 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe)
data_rate += bw_state->data_rate[pipe];
- if (DISPLAY_VER(dev_priv) >= 13 && intel_vtd_active(dev_priv))
- data_rate = data_rate * 105 / 100;
+ if (DISPLAY_VER(dev_priv) >= 13 && i915_vtd_active(dev_priv))
+ data_rate = DIV_ROUND_UP(data_rate * 105, 100);
return data_rate;
}
@@ -674,6 +696,53 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state)
return to_intel_bw_state(bw_state);
}
+static bool intel_bw_state_changed(struct drm_i915_private *i915,
+ const struct intel_bw_state *old_bw_state,
+ const struct intel_bw_state *new_bw_state)
+{
+ enum pipe pipe;
+
+ for_each_pipe(i915, pipe) {
+ const struct intel_dbuf_bw *old_crtc_bw =
+ &old_bw_state->dbuf_bw[pipe];
+ const struct intel_dbuf_bw *new_crtc_bw =
+ &new_bw_state->dbuf_bw[pipe];
+ enum dbuf_slice slice;
+
+ for_each_dbuf_slice(i915, slice) {
+ if (old_crtc_bw->max_bw[slice] != new_crtc_bw->max_bw[slice] ||
+ old_crtc_bw->active_planes[slice] != new_crtc_bw->active_planes[slice])
+ return true;
+ }
+
+ if (old_bw_state->min_cdclk[pipe] != new_bw_state->min_cdclk[pipe])
+ return true;
+ }
+
+ return false;
+}
+
+static void skl_plane_calc_dbuf_bw(struct intel_bw_state *bw_state,
+ struct intel_crtc *crtc,
+ enum plane_id plane_id,
+ const struct skl_ddb_entry *ddb,
+ unsigned int data_rate)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe];
+ unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(i915, ddb);
+ enum dbuf_slice slice;
+
+ /*
+ * The arbiter can only really guarantee an
+ * equal share of the total bw to each plane.
+ */
+ for_each_dbuf_slice_in_mask(i915, slice, dbuf_mask) {
+ crtc_bw->max_bw[slice] = max(crtc_bw->max_bw[slice], data_rate);
+ crtc_bw->active_planes[slice] |= BIT(plane_id);
+ }
+}
+
static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state)
{
@@ -682,136 +751,145 @@ static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state,
struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe];
enum plane_id plane_id;
- memset(&crtc_bw->used_bw, 0, sizeof(crtc_bw->used_bw));
+ memset(crtc_bw, 0, sizeof(*crtc_bw));
if (!crtc_state->hw.active)
return;
for_each_plane_id_on_crtc(crtc, plane_id) {
- const struct skl_ddb_entry *ddb_y =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
- const struct skl_ddb_entry *ddb_uv =
- &crtc_state->wm.skl.plane_ddb_uv[plane_id];
- unsigned int data_rate = crtc_state->data_rate[plane_id];
- unsigned int dbuf_mask = 0;
- enum dbuf_slice slice;
-
- dbuf_mask |= skl_ddb_dbuf_slice_mask(i915, ddb_y);
- dbuf_mask |= skl_ddb_dbuf_slice_mask(i915, ddb_uv);
-
/*
- * FIXME: To calculate that more properly we probably
- * need to split per plane data_rate into data_rate_y
- * and data_rate_uv for multiplanar formats in order not
- * to get accounted those twice if they happen to reside
- * on different slices.
- * However for pre-icl this would work anyway because
- * we have only single slice and for icl+ uv plane has
- * non-zero data rate.
- * So in worst case those calculation are a bit
- * pessimistic, which shouldn't pose any significant
- * problem anyway.
+ * We assume cursors are small enough
+ * to not cause bandwidth problems.
*/
- for_each_dbuf_slice_in_mask(i915, slice, dbuf_mask)
- crtc_bw->used_bw[slice] += data_rate;
- }
-}
-
-int skl_bw_calc_min_cdclk(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_bw_state *new_bw_state = NULL;
- struct intel_bw_state *old_bw_state = NULL;
- const struct intel_crtc_state *crtc_state;
- struct intel_crtc *crtc;
- int max_bw = 0;
- enum pipe pipe;
- int i;
-
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- new_bw_state = intel_atomic_get_bw_state(state);
- if (IS_ERR(new_bw_state))
- return PTR_ERR(new_bw_state);
+ if (plane_id == PLANE_CURSOR)
+ continue;
- old_bw_state = intel_atomic_get_old_bw_state(state);
+ skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id,
+ &crtc_state->wm.skl.plane_ddb[plane_id],
+ crtc_state->data_rate[plane_id]);
- skl_crtc_calc_dbuf_bw(new_bw_state, crtc_state);
+ if (DISPLAY_VER(i915) < 11)
+ skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id,
+ &crtc_state->wm.skl.plane_ddb_y[plane_id],
+ crtc_state->data_rate[plane_id]);
}
+}
- if (!old_bw_state)
- return 0;
+/* "Maximum Data Buffer Bandwidth" */
+static int
+intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915,
+ const struct intel_bw_state *bw_state)
+{
+ unsigned int total_max_bw = 0;
+ enum dbuf_slice slice;
- for_each_pipe(dev_priv, pipe) {
- struct intel_dbuf_bw *crtc_bw;
- enum dbuf_slice slice;
+ for_each_dbuf_slice(i915, slice) {
+ int num_active_planes = 0;
+ unsigned int max_bw = 0;
+ enum pipe pipe;
- crtc_bw = &new_bw_state->dbuf_bw[pipe];
+ /*
+ * The arbiter can only really guarantee an
+ * equal share of the total bw to each plane.
+ */
+ for_each_pipe(i915, pipe) {
+ const struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[pipe];
- for_each_dbuf_slice(dev_priv, slice) {
- /*
- * Current experimental observations show that contrary
- * to BSpec we get underruns once we exceed 64 * CDCLK
- * for slices in total.
- * As a temporary measure in order not to keep CDCLK
- * bumped up all the time we calculate CDCLK according
- * to this formula for overall bw consumed by slices.
- */
- max_bw += crtc_bw->used_bw[slice];
+ max_bw = max(crtc_bw->max_bw[slice], max_bw);
+ num_active_planes += hweight8(crtc_bw->active_planes[slice]);
}
+ max_bw *= num_active_planes;
+
+ total_max_bw = max(total_max_bw, max_bw);
}
- new_bw_state->min_cdclk = max_bw / 64;
+ return DIV_ROUND_UP(total_max_bw, 64);
+}
- if (new_bw_state->min_cdclk != old_bw_state->min_cdclk) {
- int ret = intel_atomic_lock_global_state(&new_bw_state->base);
+int intel_bw_min_cdclk(struct drm_i915_private *i915,
+ const struct intel_bw_state *bw_state)
+{
+ enum pipe pipe;
+ int min_cdclk;
- if (ret)
- return ret;
- }
+ min_cdclk = intel_bw_dbuf_min_cdclk(i915, bw_state);
- return 0;
+ for_each_pipe(i915, pipe)
+ min_cdclk = max(bw_state->min_cdclk[pipe], min_cdclk);
+
+ return min_cdclk;
}
-int intel_bw_calc_min_cdclk(struct intel_atomic_state *state)
+int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
+ bool *need_cdclk_calc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_bw_state *new_bw_state = NULL;
- struct intel_bw_state *old_bw_state = NULL;
+ const struct intel_bw_state *old_bw_state = NULL;
+ const struct intel_cdclk_state *cdclk_state;
const struct intel_crtc_state *crtc_state;
+ int old_min_cdclk, new_min_cdclk;
struct intel_crtc *crtc;
- int min_cdclk = 0;
- enum pipe pipe;
int i;
+ if (DISPLAY_VER(dev_priv) < 9)
+ return 0;
+
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
new_bw_state = intel_atomic_get_bw_state(state);
if (IS_ERR(new_bw_state))
return PTR_ERR(new_bw_state);
old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ skl_crtc_calc_dbuf_bw(new_bw_state, crtc_state);
+
+ new_bw_state->min_cdclk[crtc->pipe] =
+ intel_bw_crtc_min_cdclk(crtc_state);
}
if (!old_bw_state)
return 0;
- for_each_pipe(dev_priv, pipe) {
- struct intel_cdclk_state *cdclk_state;
+ if (intel_bw_state_changed(dev_priv, old_bw_state, new_bw_state)) {
+ int ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
- cdclk_state = intel_atomic_get_new_cdclk_state(state);
- if (!cdclk_state)
- return 0;
+ old_min_cdclk = intel_bw_min_cdclk(dev_priv, old_bw_state);
+ new_min_cdclk = intel_bw_min_cdclk(dev_priv, new_bw_state);
- min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
- }
+ /*
+ * No need to check against the cdclk state if
+ * the min cdclk doesn't increase.
+ *
+ * Ie. we only ever increase the cdclk due to bandwidth
+ * requirements. This can reduce back and forth
+ * display blinking due to constant cdclk changes.
+ */
+ if (new_min_cdclk <= old_min_cdclk)
+ return 0;
- new_bw_state->min_cdclk = min_cdclk;
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
- if (new_bw_state->min_cdclk != old_bw_state->min_cdclk) {
- int ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ /*
+ * No need to recalculate the cdclk state if
+ * the min cdclk doesn't increase.
+ *
+ * Ie. we only ever increase the cdclk due to bandwidth
+ * requirements. This can reduce back and forth
+ * display blinking due to constant cdclk changes.
+ */
+ if (new_min_cdclk <= cdclk_state->bw_min_cdclk)
+ return 0;
- if (ret)
- return ret;
- }
+ drm_dbg_kms(&dev_priv->drm,
+ "new bandwidth min cdclk (%d kHz) > old min cdclk (%d kHz)\n",
+ new_min_cdclk, cdclk_state->bw_min_cdclk);
+ *need_cdclk_calc = true;
return 0;
}
@@ -820,7 +898,7 @@ static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
{
unsigned int num_psf_gv_points = i915->max_bw[0].num_psf_gv_points;
unsigned int num_qgv_points = i915->max_bw[0].num_qgv_points;
- u16 mask = 0;
+ u16 qgv_points = 0, psf_points = 0;
/*
* We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects
@@ -828,12 +906,12 @@ static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
* So need to operate only with those returned from PCode.
*/
if (num_qgv_points > 0)
- mask |= REG_GENMASK(num_qgv_points - 1, 0);
+ qgv_points = GENMASK(num_qgv_points - 1, 0);
if (num_psf_gv_points > 0)
- mask |= REG_GENMASK(num_psf_gv_points - 1, 0) << ADLS_PSF_PT_SHIFT;
+ psf_points = GENMASK(num_psf_gv_points - 1, 0);
- return mask;
+ return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points);
}
static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
@@ -890,7 +968,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
unsigned int data_rate;
unsigned int num_active_planes;
int i, ret;
- u32 allowed_points = 0;
+ u16 qgv_points = 0, psf_points = 0;
unsigned int max_bw_point = 0, max_bw = 0;
unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
unsigned int num_psf_gv_points = dev_priv->max_bw[0].num_psf_gv_points;
@@ -948,7 +1026,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
max_bw = max_data_rate;
}
if (max_data_rate >= data_rate)
- allowed_points |= REG_FIELD_PREP(ADLS_QGV_PT_MASK, BIT(i));
+ qgv_points |= BIT(i);
drm_dbg_kms(&dev_priv->drm, "QGV point %d: max bw %d required %d\n",
i, max_data_rate, data_rate);
@@ -958,7 +1036,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
unsigned int max_data_rate = adl_psf_bw(dev_priv, i);
if (max_data_rate >= data_rate)
- allowed_points |= REG_FIELD_PREP(ADLS_PSF_PT_MASK, BIT(i));
+ psf_points |= BIT(i);
drm_dbg_kms(&dev_priv->drm, "PSF GV point %d: max bw %d"
" required %d\n",
@@ -970,20 +1048,18 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
* left, so if we couldn't - simply reject the configuration for obvious
* reasons.
*/
- if ((allowed_points & ADLS_QGV_PT_MASK) == 0) {
+ if (qgv_points == 0) {
drm_dbg_kms(&dev_priv->drm, "No QGV points provide sufficient memory"
" bandwidth %d for display configuration(%d active planes).\n",
data_rate, num_active_planes);
return -EINVAL;
}
- if (num_psf_gv_points > 0) {
- if ((allowed_points & ADLS_PSF_PT_MASK) == 0) {
- drm_dbg_kms(&dev_priv->drm, "No PSF GV points provide sufficient memory"
- " bandwidth %d for display configuration(%d active planes).\n",
- data_rate, num_active_planes);
- return -EINVAL;
- }
+ if (num_psf_gv_points > 0 && psf_points == 0) {
+ drm_dbg_kms(&dev_priv->drm, "No PSF GV points provide sufficient memory"
+ " bandwidth %d for display configuration(%d active planes).\n",
+ data_rate, num_active_planes);
+ return -EINVAL;
}
/*
@@ -992,16 +1068,18 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
* cause.
*/
if (!intel_can_enable_sagv(dev_priv, new_bw_state)) {
- allowed_points &= ADLS_PSF_PT_MASK;
- allowed_points |= BIT(max_bw_point);
+ qgv_points = BIT(max_bw_point);
drm_dbg_kms(&dev_priv->drm, "No SAGV, using single QGV point %d\n",
max_bw_point);
}
+
/*
* We store the ones which need to be masked as that is what PCode
* actually accepts as a parameter.
*/
- new_bw_state->qgv_points_mask = ~allowed_points &
+ new_bw_state->qgv_points_mask =
+ ~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
+ ADLS_PCODE_REQ_PSF_PT(psf_points)) &
icl_qgv_points_mask(dev_priv);
/*