summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_debugfs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_debugfs.c')
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c342
1 files changed, 177 insertions, 165 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 82bbe3f2a7e1..e3ec9049081f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -117,6 +117,20 @@ static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
}
+static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
+{
+ u64 size = 0;
+ struct i915_vma *vma;
+
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (i915_is_ggtt(vma->vm) &&
+ drm_mm_node_allocated(&vma->node))
+ size += vma->node.size;
+ }
+
+ return size;
+}
+
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
@@ -156,13 +170,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg);
list_for_each_entry(vma, &obj->vma_list, vma_link) {
- if (!i915_is_ggtt(vma->vm))
- seq_puts(m, " (pp");
+ seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
+ i915_is_ggtt(vma->vm) ? "g" : "pp",
+ vma->node.start, vma->node.size);
+ if (i915_is_ggtt(vma->vm))
+ seq_printf(m, ", type: %u)", vma->ggtt_view.type);
else
- seq_puts(m, " (g");
- seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)",
- vma->node.start, vma->node.size,
- vma->ggtt_view.type);
+ seq_puts(m, ")");
}
if (obj->stolen)
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
@@ -198,7 +212,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm = &dev_priv->gtt.base;
struct i915_vma *vma;
- size_t total_obj_size, total_gtt_size;
+ u64 total_obj_size, total_gtt_size;
int count, ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -231,7 +245,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
}
mutex_unlock(&dev->struct_mutex);
- seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+ seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
count, total_obj_size, total_gtt_size);
return 0;
}
@@ -253,7 +267,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
- size_t total_obj_size, total_gtt_size;
+ u64 total_obj_size, total_gtt_size;
LIST_HEAD(stolen);
int count, ret;
@@ -269,7 +283,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
list_add(&obj->obj_exec_link, &stolen);
total_obj_size += obj->base.size;
- total_gtt_size += i915_gem_obj_ggtt_size(obj);
+ total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
count++;
}
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
@@ -292,14 +306,14 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
}
mutex_unlock(&dev->struct_mutex);
- seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+ seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
count, total_obj_size, total_gtt_size);
return 0;
}
#define count_objects(list, member) do { \
list_for_each_entry(obj, list, member) { \
- size += i915_gem_obj_ggtt_size(obj); \
+ size += i915_gem_obj_total_ggtt_size(obj); \
++count; \
if (obj->map_and_fenceable) { \
mappable_size += i915_gem_obj_ggtt_size(obj); \
@@ -310,10 +324,10 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
struct file_stats {
struct drm_i915_file_private *file_priv;
- int count;
- size_t total, unbound;
- size_t global, shared;
- size_t active, inactive;
+ unsigned long count;
+ u64 total, unbound;
+ u64 global, shared;
+ u64 active, inactive;
};
static int per_file_stats(int id, void *ptr, void *data)
@@ -370,7 +384,7 @@ static int per_file_stats(int id, void *ptr, void *data)
#define print_file_stats(m, name, stats) do { \
if (stats.count) \
- seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", \
+ seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
name, \
stats.count, \
stats.total, \
@@ -405,7 +419,7 @@ static void print_batch_pool_stats(struct seq_file *m,
#define count_vmas(list, member) do { \
list_for_each_entry(vma, list, member) { \
- size += i915_gem_obj_ggtt_size(vma->obj); \
+ size += i915_gem_obj_total_ggtt_size(vma->obj); \
++count; \
if (vma->obj->map_and_fenceable) { \
mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
@@ -420,7 +434,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 count, mappable_count, purgeable_count;
- size_t size, mappable_size, purgeable_size;
+ u64 size, mappable_size, purgeable_size;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_file *file;
@@ -437,17 +451,17 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
size = count = mappable_size = mappable_count = 0;
count_objects(&dev_priv->mm.bound_list, global_list);
- seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
+ seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
count_vmas(&vm->active_list, mm_list);
- seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
+ seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
count_vmas(&vm->inactive_list, mm_list);
- seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
+ seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = purgeable_size = purgeable_count = 0;
@@ -456,7 +470,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
if (obj->madv == I915_MADV_DONTNEED)
purgeable_size += obj->base.size, ++purgeable_count;
}
- seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
+ seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
size = count = mappable_size = mappable_count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
@@ -473,16 +487,16 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
++purgeable_count;
}
}
- seq_printf(m, "%u purgeable objects, %zu bytes\n",
+ seq_printf(m, "%u purgeable objects, %llu bytes\n",
purgeable_count, purgeable_size);
- seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
+ seq_printf(m, "%u pinned mappable objects, %llu bytes\n",
mappable_count, mappable_size);
- seq_printf(m, "%u fault mappable objects, %zu bytes\n",
+ seq_printf(m, "%u fault mappable objects, %llu bytes\n",
count, size);
- seq_printf(m, "%zu [%lu] gtt total\n",
+ seq_printf(m, "%llu [%llu] gtt total\n",
dev_priv->gtt.base.total,
- dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
+ (u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
seq_putc(m, '\n');
print_batch_pool_stats(m, dev_priv);
@@ -519,7 +533,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
uintptr_t list = (uintptr_t) node->info_ent->data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
- size_t total_obj_size, total_gtt_size;
+ u64 total_obj_size, total_gtt_size;
int count, ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -535,13 +549,13 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
describe_obj(m, obj);
seq_putc(m, '\n');
total_obj_size += obj->base.size;
- total_gtt_size += i915_gem_obj_ggtt_size(obj);
+ total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
count++;
}
mutex_unlock(&dev->struct_mutex);
- seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+ seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
count, total_obj_size, total_gtt_size);
return 0;
@@ -1132,9 +1146,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
} else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
IS_BROADWELL(dev) || IS_GEN9(dev)) {
- u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
- u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
- u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ u32 rp_state_limits;
+ u32 gt_perf_status;
+ u32 rp_state_cap;
u32 rpmodectl, rpinclimit, rpdeclimit;
u32 rpstat, cagf, reqf;
u32 rpupei, rpcurup, rpprevup;
@@ -1142,6 +1156,15 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
int max_freq;
+ rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
+ if (IS_BROXTON(dev)) {
+ rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
+ gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
+ } else {
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+ }
+
/* RPSTAT1 is in the GT power well */
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
@@ -1229,7 +1252,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Down threshold: %d%%\n",
dev_priv->rps.down_threshold);
- max_freq = (rp_state_cap & 0xff0000) >> 16;
+ max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
+ rp_state_cap >> 16) & 0xff;
max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
@@ -1239,7 +1263,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
- max_freq = rp_state_cap & 0xff;
+ max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
+ rp_state_cap >> 0) & 0xff;
max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
@@ -1581,6 +1606,21 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
return ironlake_drpc_info(m);
}
+static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ seq_printf(m, "FB tracking busy bits: 0x%08x\n",
+ dev_priv->fb_tracking.busy_bits);
+
+ seq_printf(m, "FB tracking flip bits: 0x%08x\n",
+ dev_priv->fb_tracking.flip_bits);
+
+ return 0;
+}
+
static int i915_fbc_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
@@ -1593,51 +1633,20 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
}
intel_runtime_pm_get(dev_priv);
+ mutex_lock(&dev_priv->fbc.lock);
- if (intel_fbc_enabled(dev)) {
+ if (intel_fbc_enabled(dev_priv))
seq_puts(m, "FBC enabled\n");
- } else {
- seq_puts(m, "FBC disabled: ");
- switch (dev_priv->fbc.no_fbc_reason) {
- case FBC_OK:
- seq_puts(m, "FBC actived, but currently disabled in hardware");
- break;
- case FBC_UNSUPPORTED:
- seq_puts(m, "unsupported by this chipset");
- break;
- case FBC_NO_OUTPUT:
- seq_puts(m, "no outputs");
- break;
- case FBC_STOLEN_TOO_SMALL:
- seq_puts(m, "not enough stolen memory");
- break;
- case FBC_UNSUPPORTED_MODE:
- seq_puts(m, "mode not supported");
- break;
- case FBC_MODE_TOO_LARGE:
- seq_puts(m, "mode too large");
- break;
- case FBC_BAD_PLANE:
- seq_puts(m, "FBC unsupported on plane");
- break;
- case FBC_NOT_TILED:
- seq_puts(m, "scanout buffer not tiled");
- break;
- case FBC_MULTIPLE_PIPES:
- seq_puts(m, "multiple pipes are enabled");
- break;
- case FBC_MODULE_PARAM:
- seq_puts(m, "disabled per module param (default off)");
- break;
- case FBC_CHIP_DEFAULT:
- seq_puts(m, "disabled per chip default");
- break;
- default:
- seq_puts(m, "unknown reason");
- }
- seq_putc(m, '\n');
- }
+ else
+ seq_printf(m, "FBC disabled: %s\n",
+ intel_no_fbc_reason_str(dev_priv->fbc.no_fbc_reason));
+
+ if (INTEL_INFO(dev_priv)->gen >= 7)
+ seq_printf(m, "Compressing: %s\n",
+ yesno(I915_READ(FBC_STATUS2) &
+ FBC_COMPRESSION_MASK));
+ mutex_unlock(&dev_priv->fbc.lock);
intel_runtime_pm_put(dev_priv);
return 0;
@@ -1651,9 +1660,7 @@ static int i915_fbc_fc_get(void *data, u64 *val)
if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
return -ENODEV;
- drm_modeset_lock_all(dev);
*val = dev_priv->fbc.false_color;
- drm_modeset_unlock_all(dev);
return 0;
}
@@ -1667,7 +1674,7 @@ static int i915_fbc_fc_set(void *data, u64 val)
if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
return -ENODEV;
- drm_modeset_lock_all(dev);
+ mutex_lock(&dev_priv->fbc.lock);
reg = I915_READ(ILK_DPFC_CONTROL);
dev_priv->fbc.false_color = val;
@@ -1676,7 +1683,7 @@ static int i915_fbc_fc_set(void *data, u64 val)
(reg | FBC_CTL_FALSE_COLOR) :
(reg & ~FBC_CTL_FALSE_COLOR));
- drm_modeset_unlock_all(dev);
+ mutex_unlock(&dev_priv->fbc.lock);
return 0;
}
@@ -1778,8 +1785,9 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
int gpu_freq, ia_freq;
+ unsigned int max_gpu_freq, min_gpu_freq;
- if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
+ if (!HAS_CORE_RING_FREQ(dev)) {
seq_puts(m, "unsupported on this chipset\n");
return 0;
}
@@ -1792,17 +1800,27 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
if (ret)
goto out;
+ if (IS_SKYLAKE(dev)) {
+ /* Convert GT frequency to 50 HZ units */
+ min_gpu_freq =
+ dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
+ max_gpu_freq =
+ dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
+ } else {
+ min_gpu_freq = dev_priv->rps.min_freq_softlimit;
+ max_gpu_freq = dev_priv->rps.max_freq_softlimit;
+ }
+
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
- for (gpu_freq = dev_priv->rps.min_freq_softlimit;
- gpu_freq <= dev_priv->rps.max_freq_softlimit;
- gpu_freq++) {
+ for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
ia_freq = gpu_freq;
sandybridge_pcode_read(dev_priv,
GEN6_PCODE_READ_MIN_FREQ_TABLE,
&ia_freq);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
- intel_gpu_freq(dev_priv, gpu_freq),
+ intel_gpu_freq(dev_priv, (gpu_freq *
+ (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1))),
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
}
@@ -1848,8 +1866,9 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct intel_fbdev *ifbdev = NULL;
struct intel_framebuffer *fb;
+ struct drm_framebuffer *drm_fb;
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = dev->dev_private;
ifbdev = dev_priv->fbdev;
@@ -1867,7 +1886,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
#endif
mutex_lock(&dev->mode_config.fb_lock);
- list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
+ drm_for_each_fb(drm_fb, dev) {
+ fb = to_intel_framebuffer(drm_fb);
if (ifbdev && &fb->base == ifbdev->helper.fb)
continue;
@@ -2248,7 +2268,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
seq_puts(m, "aliasing PPGTT:\n");
- seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.pd_offset);
+ seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
ppgtt->debug_dump(ppgtt, m);
}
@@ -2479,13 +2499,13 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
return 0;
}
-static int i915_pc8_status(struct seq_file *m, void *unused)
+static int i915_runtime_pm_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
+ if (!HAS_RUNTIME_PM(dev)) {
seq_puts(m, "not supported\n");
return 0;
}
@@ -2493,6 +2513,12 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
seq_printf(m, "IRQs disabled: %s\n",
yesno(!intel_irqs_enabled(dev_priv)));
+#ifdef CONFIG_PM
+ seq_printf(m, "Usage count: %d\n",
+ atomic_read(&dev->dev->power.usage_count));
+#else
+ seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
+#endif
return 0;
}
@@ -2536,6 +2562,8 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
return "PORT_DDI_D_2_LANES";
case POWER_DOMAIN_PORT_DDI_D_4_LANES:
return "PORT_DDI_D_4_LANES";
+ case POWER_DOMAIN_PORT_DDI_E_2_LANES:
+ return "PORT_DDI_E_2_LANES";
case POWER_DOMAIN_PORT_DSI:
return "PORT_DSI";
case POWER_DOMAIN_PORT_CRT:
@@ -2780,13 +2808,16 @@ static int i915_display_info(struct seq_file *m, void *unused)
seq_printf(m, "---------\n");
for_each_intel_crtc(dev, crtc) {
bool active;
+ struct intel_crtc_state *pipe_config;
int x, y;
+ pipe_config = to_intel_crtc_state(crtc->base.state);
+
seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
crtc->base.base.id, pipe_name(crtc->pipe),
- yesno(crtc->active), crtc->config->pipe_src_w,
- crtc->config->pipe_src_h);
- if (crtc->active) {
+ yesno(pipe_config->base.active),
+ pipe_config->pipe_src_w, pipe_config->pipe_src_h);
+ if (pipe_config->base.active) {
intel_crtc_info(m, crtc);
active = cursor_position(dev, crtc->pipe, &x, &y);
@@ -3027,7 +3058,7 @@ static void drrs_status_per_crtc(struct seq_file *m,
seq_puts(m, "\n\n");
- if (intel_crtc->config->has_drrs) {
+ if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
struct intel_panel *panel;
mutex_lock(&drrs->mutex);
@@ -3079,7 +3110,7 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
for_each_intel_crtc(dev, intel_crtc) {
drm_modeset_lock(&intel_crtc->base.mutex, NULL);
- if (intel_crtc->active) {
+ if (intel_crtc->base.state->active) {
active_crtc_cnt++;
seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
@@ -3616,53 +3647,40 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
-static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
+static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
+ struct intel_crtc_state *pipe_config;
+ struct drm_atomic_state *state;
+ int ret = 0;
drm_modeset_lock_all(dev);
- /*
- * If we use the eDP transcoder we need to make sure that we don't
- * bypass the pfit, since otherwise the pipe CRC source won't work. Only
- * relevant on hsw with pipe A when using the always-on power well
- * routing.
- */
- if (crtc->config->cpu_transcoder == TRANSCODER_EDP &&
- !crtc->config->pch_pfit.enabled) {
- crtc->config->pch_pfit.force_thru = true;
-
- intel_display_power_get(dev_priv,
- POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
-
- intel_crtc_reset(crtc);
+ state = drm_atomic_state_alloc(dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto out;
}
- drm_modeset_unlock_all(dev);
-}
-
-static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
- drm_modeset_lock_all(dev);
- /*
- * If we use the eDP transcoder we need to make sure that we don't
- * bypass the pfit, since otherwise the pipe CRC source won't work. Only
- * relevant on hsw with pipe A when using the always-on power well
- * routing.
- */
- if (crtc->config->pch_pfit.force_thru) {
- crtc->config->pch_pfit.force_thru = false;
+ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
+ pipe_config = intel_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(pipe_config)) {
+ ret = PTR_ERR(pipe_config);
+ goto out;
+ }
- intel_crtc_reset(crtc);
+ pipe_config->pch_pfit.force_thru = enable;
+ if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
+ pipe_config->pch_pfit.enabled != enable)
+ pipe_config->base.connectors_changed = true;
- intel_display_power_put(dev_priv,
- POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
- }
+ ret = drm_atomic_commit(state);
+out:
drm_modeset_unlock_all(dev);
+ WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
+ if (ret)
+ drm_atomic_state_free(state);
}
static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
@@ -3682,7 +3700,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
break;
case INTEL_PIPE_CRC_SOURCE_PF:
if (IS_HASWELL(dev) && pipe == PIPE_A)
- hsw_trans_edp_pipe_A_crc_wa(dev);
+ hsw_trans_edp_pipe_A_crc_wa(dev, true);
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
break;
@@ -3776,7 +3794,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
pipe_name(pipe));
drm_modeset_lock(&crtc->base.mutex, NULL);
- if (crtc->active)
+ if (crtc->base.state->active)
intel_wait_for_vblank(dev, pipe);
drm_modeset_unlock(&crtc->base.mutex);
@@ -3794,7 +3812,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
else if (IS_VALLEYVIEW(dev))
vlv_undo_pipe_scramble_reset(dev, pipe);
else if (IS_HASWELL(dev) && pipe == PIPE_A)
- hsw_undo_trans_edp_pipe_A_crc_wa(dev);
+ hsw_trans_edp_pipe_A_crc_wa(dev, false);
hsw_enable_ips(crtc);
}
@@ -3980,24 +3998,14 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
{
char *input_buffer;
int status = 0;
- struct seq_file *m;
struct drm_device *dev;
struct drm_connector *connector;
struct list_head *connector_list;
struct intel_dp *intel_dp;
int val = 0;
- m = file->private_data;
- if (!m) {
- status = -ENODEV;
- return status;
- }
- dev = m->private;
+ dev = ((struct seq_file *)file->private_data)->private;
- if (!dev) {
- status = -ENODEV;
- return status;
- }
connector_list = &dev->mode_config.connector_list;
if (len == 0)
@@ -4021,9 +4029,7 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
DRM_MODE_CONNECTOR_DisplayPort)
continue;
- if (connector->connector_type ==
- DRM_MODE_CONNECTOR_DisplayPort &&
- connector->status == connector_status_connected &&
+ if (connector->status == connector_status_connected &&
connector->encoder != NULL) {
intel_dp = enc_to_intel_dp(connector->encoder);
status = kstrtoint(input_buffer, 10, &val);
@@ -4055,9 +4061,6 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
struct list_head *connector_list = &dev->mode_config.connector_list;
struct intel_dp *intel_dp;
- if (!dev)
- return -ENODEV;
-
list_for_each_entry(connector, connector_list, head) {
if (connector->connector_type !=
@@ -4102,9 +4105,6 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
struct list_head *connector_list = &dev->mode_config.connector_list;
struct intel_dp *intel_dp;
- if (!dev)
- return -ENODEV;
-
list_for_each_entry(connector, connector_list, head) {
if (connector->connector_type !=
@@ -4144,9 +4144,6 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
struct list_head *connector_list = &dev->mode_config.connector_list;
struct intel_dp *intel_dp;
- if (!dev)
- return -ENODEV;
-
list_for_each_entry(connector, connector_list, head) {
if (connector->connector_type !=
@@ -4183,8 +4180,15 @@ static const struct file_operations i915_displayport_test_type_fops = {
static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
{
struct drm_device *dev = m->private;
- int num_levels = ilk_wm_max_level(dev) + 1;
int level;
+ int num_levels;
+
+ if (IS_CHERRYVIEW(dev))
+ num_levels = 3;
+ else if (IS_VALLEYVIEW(dev))
+ num_levels = 1;
+ else
+ num_levels = ilk_wm_max_level(dev) + 1;
drm_modeset_lock_all(dev);
@@ -4193,9 +4197,9 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
/*
* - WM1+ latency values in 0.5us units
- * - latencies are in us on gen9
+ * - latencies are in us on gen9/vlv/chv
*/
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev))
latency *= 10;
else if (level > 0)
latency *= 5;
@@ -4259,7 +4263,7 @@ static int pri_wm_latency_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
- if (HAS_GMCH_DISPLAY(dev))
+ if (INTEL_INFO(dev)->gen < 5)
return -ENODEV;
return single_open(file, pri_wm_latency_show, dev);
@@ -4291,11 +4295,18 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
struct seq_file *m = file->private_data;
struct drm_device *dev = m->private;
uint16_t new[8] = { 0 };
- int num_levels = ilk_wm_max_level(dev) + 1;
+ int num_levels;
int level;
int ret;
char tmp[32];
+ if (IS_CHERRYVIEW(dev))
+ num_levels = 3;
+ else if (IS_VALLEYVIEW(dev))
+ num_levels = 1;
+ else
+ num_levels = ilk_wm_max_level(dev) + 1;
+
if (len >= sizeof(tmp))
return -EINVAL;
@@ -5027,6 +5038,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_drpc_info", i915_drpc_info, 0},
{"i915_emon_status", i915_emon_status, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
+ {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
{"i915_fbc_status", i915_fbc_status, 0},
{"i915_ips_status", i915_ips_status, 0},
{"i915_sr_status", i915_sr_status, 0},
@@ -5042,7 +5054,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_edp_psr_status", i915_edp_psr_status, 0},
{"i915_sink_crc_eDP1", i915_sink_crc, 0},
{"i915_energy_uJ", i915_energy_uJ, 0},
- {"i915_pc8_status", i915_pc8_status, 0},
+ {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_display_info", i915_display_info, 0},
{"i915_semaphore_status", i915_semaphore_status, 0},