summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_sysfs.c
diff options
context:
space:
mode:
authorTom O'Rourke <Tom.O'Rourke@intel.com>2013-09-17 01:56:43 +0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-10-10 16:23:39 +0400
commit5c9669cee534cbb834d51aae115267f5e561b622 (patch)
tree12b901434fc7dc686b5fb645384bb9f754b26a0a /drivers/gpu/drm/i915/i915_sysfs.c
parent09e14bf3ba4b72be4d57d99e3620beae4fb1ecd8 (diff)
downloadlinux-5c9669cee534cbb834d51aae115267f5e561b622.tar.xz
drm/i915: Finish enabling rps before use by sysfs or debugfs
Enabling rps (turbo setup) was put in a work queue because it may take quite awhile. This change flushes the work queue to initialize rps values before use by sysfs or debugfs. Specifically, rps.delayed_resume_work is flushed before using rps.hw_max, rps.max_delay, rps.min_delay, or rps.cur_delay. This change fixes a problem in sysfs where show functions using uninitialized values show incorrect values and store functions using uninitialized values in range checks incorrectly fail to store valid input values. This change also addresses similar use before initialized problems in debugfs. Signed-off-by: Tom O'Rourke <Tom.O'Rourke@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_sysfs.c')
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 8003886361b8..9ff1e4d96909 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -251,6 +251,8 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) {
u32 freq;
@@ -283,6 +285,8 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev))
ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
@@ -307,6 +311,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) {
@@ -355,6 +361,8 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev))
ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
@@ -379,6 +387,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev)) {