summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c4
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c1
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c77
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c4
-rw-r--r--drivers/gpu/drm/drm_fops.c8
-rw-r--r--drivers/gpu/drm/drm_gem.c6
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c2
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c12
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c34
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h28
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c134
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c31
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c40
-rw-r--r--drivers/gpu/drm/i915/i915_params.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h15
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c109
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c40
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c11
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c29
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c16
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c5
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c254
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c52
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_nvif.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvif/driver.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.h6
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c23
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c35
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c31
-rw-r--r--drivers/gpu/drm/radeon/cik.c35
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c30
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/dce3_1_afmt.c10
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c14
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c15
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c57
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c30
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c6
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c25
-rw-r--r--drivers/gpu/drm/radeon/r100.c28
-rw-r--r--drivers/gpu/drm/radeon/r600.c14
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c207
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c32
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c172
-rw-r--r--drivers/gpu/drm/radeon/r600d.h24
-rw-r--r--drivers/gpu/drm/radeon/radeon.h16
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c65
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c9
-rw-r--r--drivers/gpu/drm/radeon/rs400.c4
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/rv770.c1
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/si.c16
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/sid.h2
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c1
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_reg.h1
-rw-r--r--drivers/gpu/ipu-v3/Kconfig3
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c4
-rw-r--r--drivers/gpu/ipu-v3/ipu-smfc.c1
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c6
-rw-r--r--drivers/gpu/vga/vgaarb.c47
117 files changed, 1172 insertions, 1122 deletions
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 19ada0bbe319..9dc0fd5c1ea4 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -1104,8 +1104,8 @@ static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
- data32.b[2] = srcdata32[0].b[1] | (srcdata32[1].b[0] >> 4);
- data32.b[3] = srcdata32[0].b[3] | (srcdata32[1].b[2] >> 4);
+ data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4);
+ data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4);
writel(data32.ul, dstxor);
csum += data32.ul;
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 9d7346b92653..6b7efcf363d6 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -250,6 +250,7 @@ static void bochs_connector_init(struct drm_device *dev)
DRM_MODE_CONNECTOR_VIRTUAL);
drm_connector_helper_add(connector,
&bochs_connector_connector_helper_funcs);
+ drm_connector_register(connector);
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index e1c5c3222129..c7c5a9d91fa0 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -555,6 +555,7 @@ static struct drm_connector *cirrus_vga_init(struct drm_device *dev)
drm_connector_helper_add(connector, &cirrus_vga_connector_helper_funcs);
+ drm_connector_register(connector);
return connector;
}
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index b3adf1445020..070f913d2dba 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -682,7 +682,7 @@ static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_n
static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_vcpi *vcpi)
{
- int ret;
+ int ret, vcpi_ret;
mutex_lock(&mgr->payload_lock);
ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
@@ -692,8 +692,16 @@ static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
goto out_unlock;
}
+ vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
+ if (vcpi_ret > mgr->max_payloads) {
+ ret = -EINVAL;
+ DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
+ goto out_unlock;
+ }
+
set_bit(ret, &mgr->payload_mask);
- vcpi->vcpi = ret;
+ set_bit(vcpi_ret, &mgr->vcpi_mask);
+ vcpi->vcpi = vcpi_ret + 1;
mgr->proposed_vcpis[ret - 1] = vcpi;
out_unlock:
mutex_unlock(&mgr->payload_lock);
@@ -701,15 +709,23 @@ out_unlock:
}
static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
- int id)
+ int vcpi)
{
- if (id == 0)
+ int i;
+ if (vcpi == 0)
return;
mutex_lock(&mgr->payload_lock);
- DRM_DEBUG_KMS("putting payload %d\n", id);
- clear_bit(id, &mgr->payload_mask);
- mgr->proposed_vcpis[id - 1] = NULL;
+ DRM_DEBUG_KMS("putting payload %d\n", vcpi);
+ clear_bit(vcpi - 1, &mgr->vcpi_mask);
+
+ for (i = 0; i < mgr->max_payloads; i++) {
+ if (mgr->proposed_vcpis[i])
+ if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
+ mgr->proposed_vcpis[i] = NULL;
+ clear_bit(i + 1, &mgr->payload_mask);
+ }
+ }
mutex_unlock(&mgr->payload_lock);
}
@@ -1563,7 +1579,7 @@ static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
}
drm_dp_dpcd_write_payload(mgr, id, payload);
- payload->payload_state = 0;
+ payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
return 0;
}
@@ -1590,7 +1606,7 @@ static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
*/
int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
{
- int i;
+ int i, j;
int cur_slots = 1;
struct drm_dp_payload req_payload;
struct drm_dp_mst_port *port;
@@ -1607,26 +1623,46 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
port = NULL;
req_payload.num_slots = 0;
}
+
+ if (mgr->payloads[i].start_slot != req_payload.start_slot) {
+ mgr->payloads[i].start_slot = req_payload.start_slot;
+ }
/* work out what is required to happen with this payload */
- if (mgr->payloads[i].start_slot != req_payload.start_slot ||
- mgr->payloads[i].num_slots != req_payload.num_slots) {
+ if (mgr->payloads[i].num_slots != req_payload.num_slots) {
/* need to push an update for this payload */
if (req_payload.num_slots) {
- drm_dp_create_payload_step1(mgr, i + 1, &req_payload);
+ drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
mgr->payloads[i].num_slots = req_payload.num_slots;
} else if (mgr->payloads[i].num_slots) {
mgr->payloads[i].num_slots = 0;
- drm_dp_destroy_payload_step1(mgr, port, i + 1, &mgr->payloads[i]);
+ drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
req_payload.payload_state = mgr->payloads[i].payload_state;
- } else
- req_payload.payload_state = 0;
-
- mgr->payloads[i].start_slot = req_payload.start_slot;
+ mgr->payloads[i].start_slot = 0;
+ }
mgr->payloads[i].payload_state = req_payload.payload_state;
}
cur_slots += req_payload.num_slots;
}
+
+ for (i = 0; i < mgr->max_payloads; i++) {
+ if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
+ DRM_DEBUG_KMS("removing payload %d\n", i);
+ for (j = i; j < mgr->max_payloads - 1; j++) {
+ memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload));
+ mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
+ if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) {
+ set_bit(j + 1, &mgr->payload_mask);
+ } else {
+ clear_bit(j + 1, &mgr->payload_mask);
+ }
+ }
+ memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload));
+ mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
+ clear_bit(mgr->max_payloads, &mgr->payload_mask);
+
+ }
+ }
mutex_unlock(&mgr->payload_lock);
return 0;
@@ -1657,9 +1693,9 @@ int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
- ret = drm_dp_create_payload_step2(mgr, port, i + 1, &mgr->payloads[i]);
+ ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
} else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
- ret = drm_dp_destroy_payload_step2(mgr, i + 1, &mgr->payloads[i]);
+ ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
}
if (ret) {
mutex_unlock(&mgr->payload_lock);
@@ -1861,6 +1897,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
mgr->payload_mask = 0;
set_bit(0, &mgr->payload_mask);
+ mgr->vcpi_mask = 0;
}
out_unlock:
@@ -2475,7 +2512,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
mutex_unlock(&mgr->lock);
mutex_lock(&mgr->payload_lock);
- seq_printf(m, "vcpi: %lx\n", mgr->payload_mask);
+ seq_printf(m, "vcpi: %lx %lx\n", mgr->payload_mask, mgr->vcpi_mask);
for (i = 0; i < mgr->max_payloads; i++) {
if (mgr->proposed_vcpis[i]) {
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 8889f8ec50ab..bc3da32d4585 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -595,7 +595,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
goto err_ht;
}
- if (driver->driver_features & DRIVER_GEM) {
+ if (drm_core_check_feature(dev, DRIVER_GEM)) {
ret = drm_gem_init(dev);
if (ret) {
DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
@@ -625,7 +625,7 @@ static void drm_dev_release(struct kref *ref)
{
struct drm_device *dev = container_of(ref, struct drm_device, ref);
- if (dev->driver->driver_features & DRIVER_GEM)
+ if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_destroy(dev);
drm_legacy_ctxbitmap_cleanup(dev);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 1bdbfd0e0033..3bf999134bcc 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -841,13 +841,13 @@ static const struct drm_display_mode edid_cea_modes[] = {
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 45 - 720(1440)x576i@100Hz */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_DBLCLK),
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 46 - 1920x1080i@120Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 3e6694633f42..ed7bc68f7e87 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -171,7 +171,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
init_waitqueue_head(&priv->event_wait);
priv->event_space = 4096; /* set aside 4k for event buffer */
- if (dev->driver->driver_features & DRIVER_GEM)
+ if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_open(dev, priv);
if (drm_core_check_feature(dev, DRIVER_PRIME))
@@ -256,7 +256,7 @@ out_close:
out_prime_destroy:
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_prime_destroy_file_private(&priv->prime);
- if (dev->driver->driver_features & DRIVER_GEM)
+ if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_release(dev, priv);
put_pid(priv->pid);
kfree(priv);
@@ -408,10 +408,10 @@ int drm_release(struct inode *inode, struct file *filp)
drm_events_release(file_priv);
- if (dev->driver->driver_features & DRIVER_MODESET)
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_fb_release(file_priv);
- if (dev->driver->driver_features & DRIVER_GEM)
+ if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_release(dev, file_priv);
drm_legacy_ctxbitmap_flush(dev, file_priv);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index cd45e45e2cce..f6ca51259fa3 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -581,7 +581,7 @@ drm_gem_close_ioctl(struct drm_device *dev, void *data,
struct drm_gem_close *args = data;
int ret;
- if (!(dev->driver->driver_features & DRIVER_GEM))
+ if (!drm_core_check_feature(dev, DRIVER_GEM))
return -ENODEV;
ret = drm_gem_handle_delete(file_priv, args->handle);
@@ -608,7 +608,7 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *obj;
int ret;
- if (!(dev->driver->driver_features & DRIVER_GEM))
+ if (!drm_core_check_feature(dev, DRIVER_GEM))
return -ENODEV;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
@@ -661,7 +661,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
int ret;
u32 handle;
- if (!(dev->driver->driver_features & DRIVER_GEM))
+ if (!drm_core_check_feature(dev, DRIVER_GEM))
return -ENODEV;
mutex_lock(&dev->object_name_lock);
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 8749fc06570e..474e4d12a2d8 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -35,7 +35,7 @@
* of extra utility/tracking out of our acquire-ctx. This is provided
* by drm_modeset_lock / drm_modeset_acquire_ctx.
*
- * For basic principles of ww_mutex, see: Documentation/ww-mutex-design.txt
+ * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt
*
* The basic usage pattern is to:
*
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index c45856bcc8b9..593b657d3e59 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -709,11 +709,13 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
BUG_ON(!validate_regs_sorted(ring));
- ret = init_hash_table(ring, cmd_tables, cmd_table_count);
- if (ret) {
- DRM_ERROR("CMD: cmd_parser_init failed!\n");
- fini_hash_table(ring);
- return ret;
+ if (hash_empty(ring->cmd_hash)) {
+ ret = init_hash_table(ring, cmd_tables, cmd_table_count);
+ if (ret) {
+ DRM_ERROR("CMD: cmd_parser_init failed!\n");
+ fini_hash_table(ring);
+ return ret;
+ }
}
ring->needs_cmd_parser = true;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 2cbc85f3b237..063b44817e08 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3826,7 +3826,6 @@ i915_drop_caches_set(void *data, u64 val)
{
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj, *next;
int ret;
DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
@@ -3846,36 +3845,11 @@ i915_drop_caches_set(void *data, u64 val)
if (val & (DROP_RETIRE | DROP_ACTIVE))
i915_gem_retire_requests(dev);
- if (val & DROP_BOUND) {
- list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
- global_list) {
- struct i915_vma *vma, *v;
+ if (val & DROP_BOUND)
+ i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
- ret = 0;
- drm_gem_object_reference(&obj->base);
- list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) {
- if (vma->pin_count)
- continue;
-
- ret = i915_vma_unbind(vma);
- if (ret)
- break;
- }
- drm_gem_object_unreference(&obj->base);
- if (ret)
- goto unlock;
- }
- }
-
- if (val & DROP_UNBOUND) {
- list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
- global_list)
- if (obj->pages_pin_count == 0) {
- ret = i915_gem_object_put_pages(obj);
- if (ret)
- goto unlock;
- }
- }
+ if (val & DROP_UNBOUND)
+ i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
unlock:
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3870c7359a16..055d5e7fbf12 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -871,8 +871,6 @@ int i915_reset(struct drm_device *dev)
*/
if (INTEL_INFO(dev)->gen > 5)
intel_reset_gt_powersave(dev);
-
- intel_hpd_init(dev);
} else {
mutex_unlock(&dev->struct_mutex);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 19c0dd8e255e..16a6f6d187a1 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -946,23 +946,6 @@ struct intel_rps_ei {
u32 media_c0;
};
-struct intel_rps_bdw_cal {
- u32 it_threshold_pct; /* interrupt, in percentage */
- u32 eval_interval; /* evaluation interval, in us */
- u32 last_ts;
- u32 last_c0;
- bool is_up;
-};
-
-struct intel_rps_bdw_turbo {
- struct intel_rps_bdw_cal up;
- struct intel_rps_bdw_cal down;
- struct timer_list flip_timer;
- u32 timeout;
- atomic_t flip_received;
- struct work_struct work_max_freq;
-};
-
struct intel_gen6_power_mgmt {
/* work and pm_iir are protected by dev_priv->irq_lock */
struct work_struct work;
@@ -996,9 +979,6 @@ struct intel_gen6_power_mgmt {
bool enabled;
struct delayed_work delayed_resume_work;
- bool is_bdw_sw_turbo; /* Switch of BDW software turbo */
- struct intel_rps_bdw_turbo sw_turbo; /* Calculate RP interrupt timing */
-
/* manual wa residency calculations */
struct intel_rps_ei up_ei, down_ei;
@@ -2369,6 +2349,12 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
+unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
+ long target,
+ unsigned flags);
+#define I915_SHRINK_PURGEABLE 0x1
+#define I915_SHRINK_UNBOUND 0x2
+#define I915_SHRINK_BOUND 0x4
void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
@@ -2823,8 +2809,6 @@ extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
-extern void bdw_software_turbo(struct drm_device *dev);
-extern void gen8_flip_interrupt(struct drm_device *dev);
extern void valleyview_set_rps(struct drm_device *dev, u8 val);
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4ca3a6dcf10b..28f91df2604d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -60,7 +60,6 @@ static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
static int i915_gem_shrinker_oom(struct notifier_block *nb,
unsigned long event,
void *ptr);
-static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static bool cpu_cache_is_coherent(struct drm_device *dev,
@@ -1741,7 +1740,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
* offsets on purgeable objects by truncating it and marking it purged,
* which prevents userspace from ever using that object again.
*/
- i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
+ i915_gem_shrink(dev_priv,
+ obj->base.size >> PAGE_SHIFT,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_PURGEABLE);
ret = drm_gem_create_mmap_offset(&obj->base);
if (ret != -ENOSPC)
goto out;
@@ -1938,12 +1941,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
return 0;
}
-static unsigned long
-__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
- bool purgeable_only)
+unsigned long
+i915_gem_shrink(struct drm_i915_private *dev_priv,
+ long target, unsigned flags)
{
- struct list_head still_in_list;
- struct drm_i915_gem_object *obj;
+ const bool purgeable_only = flags & I915_SHRINK_PURGEABLE;
unsigned long count = 0;
/*
@@ -1965,62 +1967,68 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
* dev->struct_mutex and so we won't ever be able to observe an
* object on the bound_list with a reference count equals 0.
*/
- INIT_LIST_HEAD(&still_in_list);
- while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
- obj = list_first_entry(&dev_priv->mm.unbound_list,
- typeof(*obj), global_list);
- list_move_tail(&obj->global_list, &still_in_list);
+ if (flags & I915_SHRINK_UNBOUND) {
+ struct list_head still_in_list;
- if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
- continue;
+ INIT_LIST_HEAD(&still_in_list);
+ while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
+ struct drm_i915_gem_object *obj;
- drm_gem_object_reference(&obj->base);
+ obj = list_first_entry(&dev_priv->mm.unbound_list,
+ typeof(*obj), global_list);
+ list_move_tail(&obj->global_list, &still_in_list);
- if (i915_gem_object_put_pages(obj) == 0)
- count += obj->base.size >> PAGE_SHIFT;
+ if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
+ continue;
+
+ drm_gem_object_reference(&obj->base);
- drm_gem_object_unreference(&obj->base);
+ if (i915_gem_object_put_pages(obj) == 0)
+ count += obj->base.size >> PAGE_SHIFT;
+
+ drm_gem_object_unreference(&obj->base);
+ }
+ list_splice(&still_in_list, &dev_priv->mm.unbound_list);
}
- list_splice(&still_in_list, &dev_priv->mm.unbound_list);
- INIT_LIST_HEAD(&still_in_list);
- while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
- struct i915_vma *vma, *v;
+ if (flags & I915_SHRINK_BOUND) {
+ struct list_head still_in_list;
- obj = list_first_entry(&dev_priv->mm.bound_list,
- typeof(*obj), global_list);
- list_move_tail(&obj->global_list, &still_in_list);
+ INIT_LIST_HEAD(&still_in_list);
+ while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma, *v;
- if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
- continue;
+ obj = list_first_entry(&dev_priv->mm.bound_list,
+ typeof(*obj), global_list);
+ list_move_tail(&obj->global_list, &still_in_list);
- drm_gem_object_reference(&obj->base);
+ if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
+ continue;
- list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
- if (i915_vma_unbind(vma))
- break;
+ drm_gem_object_reference(&obj->base);
- if (i915_gem_object_put_pages(obj) == 0)
- count += obj->base.size >> PAGE_SHIFT;
+ list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
+ if (i915_vma_unbind(vma))
+ break;
+
+ if (i915_gem_object_put_pages(obj) == 0)
+ count += obj->base.size >> PAGE_SHIFT;
- drm_gem_object_unreference(&obj->base);
+ drm_gem_object_unreference(&obj->base);
+ }
+ list_splice(&still_in_list, &dev_priv->mm.bound_list);
}
- list_splice(&still_in_list, &dev_priv->mm.bound_list);
return count;
}
static unsigned long
-i915_gem_purge(struct drm_i915_private *dev_priv, long target)
-{
- return __i915_gem_shrink(dev_priv, target, true);
-}
-
-static unsigned long
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
i915_gem_evict_everything(dev_priv->dev);
- return __i915_gem_shrink(dev_priv, LONG_MAX, false);
+ return i915_gem_shrink(dev_priv, LONG_MAX,
+ I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
}
static int
@@ -2067,7 +2075,11 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
for (i = 0; i < page_count; i++) {
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
if (IS_ERR(page)) {
- i915_gem_purge(dev_priv, page_count);
+ i915_gem_shrink(dev_priv,
+ page_count,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_PURGEABLE);
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
}
if (IS_ERR(page)) {
@@ -2944,6 +2956,9 @@ int i915_vma_unbind(struct i915_vma *vma)
* cause memory corruption through use-after-free.
*/
+ /* Throw away the active reference before moving to the unbound list */
+ i915_gem_object_retire(obj);
+
if (i915_is_ggtt(vma->vm)) {
i915_gem_object_finish_gtt(obj);
@@ -3336,17 +3351,20 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
return 0;
}
-static bool i915_gem_valid_gtt_space(struct drm_device *dev,
- struct drm_mm_node *gtt_space,
+static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
unsigned long cache_level)
{
+ struct drm_mm_node *gtt_space = &vma->node;
struct drm_mm_node *other;
- /* On non-LLC machines we have to be careful when putting differing
- * types of snoopable memory together to avoid the prefetcher
- * crossing memory domains and dying.
+ /*
+ * On some machines we have to be careful when putting differing types
+ * of snoopable memory together to avoid the prefetcher crossing memory
+ * domains and dying. During vm initialisation, we decide whether or not
+ * these constraints apply and set the drm_mm.color_adjust
+ * appropriately.
*/
- if (HAS_LLC(dev))
+ if (vma->vm->mm.color_adjust == NULL)
return true;
if (!drm_mm_node_allocated(gtt_space))
@@ -3484,8 +3502,7 @@ search_free:
goto err_free_vma;
}
- if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
- obj->cache_level))) {
+ if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
ret = -EINVAL;
goto err_remove_node;
}
@@ -3695,7 +3712,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
}
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
- if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
+ if (!i915_gem_valid_gtt_space(vma, cache_level)) {
ret = i915_vma_unbind(vma);
if (ret)
return ret;
@@ -5261,11 +5278,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
if (!i915_gem_shrinker_lock(dev, &unlock))
return SHRINK_STOP;
- freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
+ freed = i915_gem_shrink(dev_priv,
+ sc->nr_to_scan,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_PURGEABLE);
if (freed < sc->nr_to_scan)
- freed += __i915_gem_shrink(dev_priv,
- sc->nr_to_scan - freed,
- false);
+ freed += i915_gem_shrink(dev_priv,
+ sc->nr_to_scan - freed,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND);
if (unlock)
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index bbf4b12d842e..886ff2ee7a28 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -243,7 +243,7 @@ int
i915_gem_evict_everything(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_address_space *vm;
+ struct i915_address_space *vm, *v;
bool lists_empty = true;
int ret;
@@ -270,7 +270,7 @@ i915_gem_evict_everything(struct drm_device *dev)
i915_gem_retire_requests(dev);
/* Having flushed everything, unbind() should never raise an error */
- list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+ list_for_each_entry_safe(vm, v, &dev_priv->vm_list, global_link)
WARN_ON(i915_gem_evict_vm(vm, false));
return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 6f410cfb0510..b672b843fd5e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1273,6 +1273,16 @@ void i915_check_and_clear_faults(struct drm_device *dev)
POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
}
+static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_INFO(dev_priv->dev)->gen < 6) {
+ intel_gtt_chipset_flush();
+ } else {
+ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ POSTING_READ(GFX_FLSH_CNTL_GEN6);
+ }
+}
+
void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1289,6 +1299,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
dev_priv->gtt.base.start,
dev_priv->gtt.base.total,
true);
+
+ i915_ggtt_flush(dev_priv);
}
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
@@ -1341,7 +1353,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
}
- i915_gem_chipset_flush(dev);
+ i915_ggtt_flush(dev_priv);
}
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 21c025a209c0..85fda6b803e4 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -289,6 +289,7 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
int i915_gem_init_stolen(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
int bios_reserved = 0;
#ifdef CONFIG_INTEL_IOMMU
@@ -308,8 +309,16 @@ int i915_gem_init_stolen(struct drm_device *dev)
DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
- if (IS_VALLEYVIEW(dev))
- bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
+ if (INTEL_INFO(dev)->gen >= 8) {
+ tmp = I915_READ(GEN7_BIOS_RESERVED);
+ tmp >>= GEN8_BIOS_RESERVED_SHIFT;
+ tmp &= GEN8_BIOS_RESERVED_MASK;
+ bios_reserved = (1024*1024) << tmp;
+ } else if (IS_GEN7(dev)) {
+ tmp = I915_READ(GEN7_BIOS_RESERVED);
+ bios_reserved = tmp & GEN7_BIOS_RESERVED_256K ?
+ 256*1024 : 1024*1024;
+ }
if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index d38413997379..d182058383a9 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -293,15 +293,23 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
static struct i915_mmu_notifier *
i915_mmu_notifier_find(struct i915_mm_struct *mm)
{
- if (mm->mn == NULL) {
- down_write(&mm->mm->mmap_sem);
- mutex_lock(&to_i915(mm->dev)->mm_lock);
- if (mm->mn == NULL)
- mm->mn = i915_mmu_notifier_create(mm->mm);
- mutex_unlock(&to_i915(mm->dev)->mm_lock);
- up_write(&mm->mm->mmap_sem);
+ struct i915_mmu_notifier *mn = mm->mn;
+
+ mn = mm->mn;
+ if (mn)
+ return mn;
+
+ down_write(&mm->mm->mmap_sem);
+ mutex_lock(&to_i915(mm->dev)->mm_lock);
+ if ((mn = mm->mn) == NULL) {
+ mn = i915_mmu_notifier_create(mm->mm);
+ if (!IS_ERR(mn))
+ mm->mn = mn;
}
- return mm->mn;
+ mutex_unlock(&to_i915(mm->dev)->mm_lock);
+ up_write(&mm->mm->mmap_sem);
+
+ return mn;
}
static int
@@ -681,16 +689,15 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
static void
i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
{
- struct scatterlist *sg;
- int i;
+ struct sg_page_iter sg_iter;
BUG_ON(obj->userptr.work != NULL);
if (obj->madv != I915_MADV_WILLNEED)
obj->dirty = 0;
- for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
- struct page *page = sg_page(sg);
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+ struct page *page = sg_page_iter_page(&sg_iter);
if (obj->dirty)
set_page_dirty(page);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c96ddc953531..f66392b6e287 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1711,7 +1711,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
#define HPD_STORM_DETECT_PERIOD 1000
#define HPD_STORM_THRESHOLD 5
-static int ilk_port_to_hotplug_shift(enum port port)
+static int pch_port_to_hotplug_shift(enum port port)
{
switch (port) {
case PORT_A:
@@ -1727,7 +1727,7 @@ static int ilk_port_to_hotplug_shift(enum port port)
}
}
-static int g4x_port_to_hotplug_shift(enum port port)
+static int i915_port_to_hotplug_shift(enum port port)
{
switch (port) {
case PORT_A:
@@ -1785,12 +1785,12 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
if (port && dev_priv->hpd_irq_port[port]) {
bool long_hpd;
- if (IS_G4X(dev)) {
- dig_shift = g4x_port_to_hotplug_shift(port);
- long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
- } else {
- dig_shift = ilk_port_to_hotplug_shift(port);
+ if (HAS_PCH_SPLIT(dev)) {
+ dig_shift = pch_port_to_hotplug_shift(port);
long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
+ } else {
+ dig_shift = i915_port_to_hotplug_shift(port);
+ long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
}
DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
@@ -1979,27 +1979,6 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
res1, res2);
}
-void gen8_flip_interrupt(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->rps.is_bdw_sw_turbo)
- return;
-
- if(atomic_read(&dev_priv->rps.sw_turbo.flip_received)) {
- mod_timer(&dev_priv->rps.sw_turbo.flip_timer,
- usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies);
- }
- else {
- dev_priv->rps.sw_turbo.flip_timer.expires =
- usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies;
- add_timer(&dev_priv->rps.sw_turbo.flip_timer);
- atomic_set(&dev_priv->rps.sw_turbo.flip_received, true);
- }
-
- bdw_software_turbo(dev);
-}
-
/* The RPS events need forcewake, so we add them to a work queue and mask their
* IMR bits until the work is done. Other interrupts can be processed without
* the work queue. */
@@ -3479,12 +3458,13 @@ static void gen8_irq_reset(struct drm_device *dev)
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
{
unsigned long irqflags;
+ uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
- ~dev_priv->de_irq_mask[PIPE_B]);
+ ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
- ~dev_priv->de_irq_mask[PIPE_C]);
+ ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 139f490d464d..c91cb2033cc5 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -67,12 +67,12 @@ module_param_named(powersave, i915.powersave, int, 0600);
MODULE_PARM_DESC(powersave,
"Enable powersavings, fbc, downclocking, etc. (default: true)");
-module_param_named(semaphores, i915.semaphores, int, 0400);
+module_param_named_unsafe(semaphores, i915.semaphores, int, 0400);
MODULE_PARM_DESC(semaphores,
"Use semaphores for inter-ring sync "
"(default: -1 (use per-chip defaults))");
-module_param_named(enable_rc6, i915.enable_rc6, int, 0400);
+module_param_named_unsafe(enable_rc6, i915.enable_rc6, int, 0400);
MODULE_PARM_DESC(enable_rc6,
"Enable power-saving render C-state 6. "
"Different stages can be selected via bitmask values "
@@ -80,7 +80,7 @@ MODULE_PARM_DESC(enable_rc6,
"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
"default: -1 (use per-chip default)");
-module_param_named(enable_fbc, i915.enable_fbc, int, 0600);
+module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600);
MODULE_PARM_DESC(enable_fbc,
"Enable frame buffer compression for power savings "
"(default: -1 (use per-chip default))");
@@ -114,7 +114,7 @@ MODULE_PARM_DESC(enable_hangcheck,
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
-module_param_named(enable_ppgtt, i915.enable_ppgtt, int, 0400);
+module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400);
MODULE_PARM_DESC(enable_ppgtt,
"Override PPGTT usage. "
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b65bdfc23ccb..c01e5f31430e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -143,6 +143,14 @@
#define GAB_CTL 0x24000
#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
+#define GEN7_BIOS_RESERVED 0x1082C0
+#define GEN7_BIOS_RESERVED_1M (0 << 5)
+#define GEN7_BIOS_RESERVED_256K (1 << 5)
+#define GEN8_BIOS_RESERVED_SHIFT 7
+#define GEN7_BIOS_RESERVED_MASK 0x1
+#define GEN8_BIOS_RESERVED_MASK 0x3
+
+
/* VGA stuff */
#define VGA_ST01_MDA 0x3ba
@@ -2435,6 +2443,7 @@ enum punit_power_well {
#define _PIPEASRC 0x6001c
#define _BCLRPAT_A 0x60020
#define _VSYNCSHIFT_A 0x60028
+#define _PIPE_MULT_A 0x6002c
/* Pipe B timing regs */
#define _HTOTAL_B 0x61000
@@ -2446,6 +2455,7 @@ enum punit_power_well {
#define _PIPEBSRC 0x6101c
#define _BCLRPAT_B 0x61020
#define _VSYNCSHIFT_B 0x61028
+#define _PIPE_MULT_B 0x6102c
#define TRANSCODER_A_OFFSET 0x60000
#define TRANSCODER_B_OFFSET 0x61000
@@ -2466,6 +2476,7 @@ enum punit_power_well {
#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A)
#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A)
#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC)
+#define PIPE_MULT(trans) _TRANSCODER2(trans, _PIPE_MULT_A)
/* HSW+ eDP PSR registers */
#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
@@ -5577,10 +5588,6 @@ enum punit_power_well {
#define GEN8_UCGCTL6 0x9430
#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
-#define TIMESTAMP_CTR 0x44070
-#define FREQ_1_28_US(us) (((us) * 100) >> 7)
-#define MCHBAR_PCU_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5960)
-
#define GEN6_GFXPAUSE 0xA000
#define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b3e579b4428e..a4bd90f36a03 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -946,7 +946,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n",
port_name(port));
if (is_dvi && (port == PORT_A || port == PORT_E))
- DRM_DEBUG_KMS("Port %c is TMDS compabile\n", port_name(port));
+ DRM_DEBUG_KMS("Port %c is TMDS compatible\n", port_name(port));
if (!is_dvi && !is_dp && !is_crt)
DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n",
port_name(port));
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1386086ec245..c9e220963a78 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -73,9 +73,6 @@ static const uint32_t intel_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
};
-#define DIV_ROUND_CLOSEST_ULL(ll, d) \
-({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
-
static void intel_increase_pllclock(struct drm_device *dev,
enum pipe pipe);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
@@ -1612,6 +1609,18 @@ static void chv_enable_pll(struct intel_crtc *crtc)
mutex_unlock(&dev_priv->dpio_lock);
}
+static int intel_num_dvo_pipes(struct drm_device *dev)
+{
+ struct intel_crtc *crtc;
+ int count = 0;
+
+ for_each_intel_crtc(dev, crtc)
+ count += crtc->active &&
+ intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO);
+
+ return count;
+}
+
static void i9xx_enable_pll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -1628,7 +1637,18 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
if (IS_MOBILE(dev) && !IS_I830(dev))
assert_panel_unlocked(dev_priv, crtc->pipe);
- I915_WRITE(reg, dpll);
+ /* Enable DVO 2x clock on both PLLs if necessary */
+ if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
+ /*
+ * It appears to be important that we don't enable this
+ * for the current pipe before otherwise configuring the
+ * PLL. No idea how this should be handled if multiple
+ * DVO outputs are enabled simultaneosly.
+ */
+ dpll |= DPLL_DVO_2X_MODE;
+ I915_WRITE(DPLL(!crtc->pipe),
+ I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
+ }
/* Wait for the clocks to stabilize. */
POSTING_READ(reg);
@@ -1667,8 +1687,22 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
*
* Note! This is for pre-ILK only.
*/
-static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+static void i9xx_disable_pll(struct intel_crtc *crtc)
{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = crtc->pipe;
+
+ /* Disable DVO 2x clock on both PLLs if necessary */
+ if (IS_I830(dev) &&
+ intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO) &&
+ intel_num_dvo_pipes(dev) == 1) {
+ I915_WRITE(DPLL(PIPE_B),
+ I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
+ I915_WRITE(DPLL(PIPE_A),
+ I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
+ }
+
/* Don't disable pipe or pipe PLLs if needed */
if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
@@ -4185,6 +4219,11 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_set_pipe_timings(intel_crtc);
+ if (intel_crtc->config.cpu_transcoder != TRANSCODER_EDP) {
+ I915_WRITE(PIPE_MULT(intel_crtc->config.cpu_transcoder),
+ intel_crtc->config.pixel_multiplier - 1);
+ }
+
if (intel_crtc->config.has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
&intel_crtc->config.fdi_m_n, NULL);
@@ -4941,7 +4980,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
else if (IS_VALLEYVIEW(dev))
vlv_disable_pll(dev_priv, pipe);
else
- i9xx_disable_pll(dev_priv, pipe);
+ i9xx_disable_pll(intel_crtc);
}
if (!IS_GEN2(dev))
@@ -5945,7 +5984,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
dpll |= PLL_P2_DIVIDE_BY_4;
}
- if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
+ if (!IS_I830(dev) && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
@@ -6451,6 +6490,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
}
pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
if (!IS_VALLEYVIEW(dev)) {
+ /*
+ * DPLL_DVO_2X_MODE must be enabled for both DPLLs
+ * on 830. Filter it out here so that we don't
+ * report errors due to that.
+ */
+ if (IS_I830(dev))
+ pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
+
pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
} else {
@@ -7845,7 +7892,12 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
(I915_READ(IPS_CTL) & IPS_ENABLE);
- pipe_config->pixel_multiplier = 1;
+ if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
+ pipe_config->pixel_multiplier =
+ I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
+ } else {
+ pipe_config->pixel_multiplier = 1;
+ }
return true;
}
@@ -9881,9 +9933,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
unsigned long flags;
int ret;
- //trigger software GT busyness calculation
- gen8_flip_interrupt(dev);
-
/*
* drm_mode_page_flip_ioctl() should already catch this, but double
* check to be safe. In the future we may enable pageflipping from
@@ -10039,8 +10088,11 @@ free_work:
out_hang:
intel_crtc_wait_for_pending_flips(crtc);
ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
- if (ret == 0 && event)
+ if (ret == 0 && event) {
+ spin_lock_irqsave(&dev->event_lock, flags);
drm_send_vblank_event(dev, pipe, event);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
}
return ret;
}
@@ -12302,27 +12354,36 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
- if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
+ /*
+ * The DP_DETECTED bit is the latched state of the DDC
+ * SDA pin at boot. However since eDP doesn't require DDC
+ * (no way to plug in a DP->HDMI dongle) the DDC pins for
+ * eDP ports may have been muxed to an alternate function.
+ * Thus we can't rely on the DP_DETECTED bit alone to detect
+ * eDP ports. Consult the VBT as well as DP_DETECTED to
+ * detect eDP ports.
+ */
+ if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED)
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
PORT_B);
- if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
- intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
- }
+ if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED ||
+ intel_dp_is_edp(dev, PORT_B))
+ intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
- if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
+ if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED)
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
PORT_C);
- if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
- intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
- }
+ if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED ||
+ intel_dp_is_edp(dev, PORT_C))
+ intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
if (IS_CHERRYVIEW(dev)) {
- if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) {
+ if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED)
intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
PORT_D);
- if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
- intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
- }
+ /* eDP not supported on port D, so don't check VBT */
+ if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
+ intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
}
intel_dsi_init(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 2a26774ddb68..f6a3fdd5589e 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1068,23 +1068,15 @@ intel_dp_compute_config(struct intel_encoder *encoder,
bpp = dev_priv->vbt.edp_bpp;
}
- if (IS_BROADWELL(dev)) {
- /* Yes, it's an ugly hack. */
- min_lane_count = max_lane_count;
- DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n",
- min_lane_count);
- } else if (dev_priv->vbt.edp_lanes) {
- min_lane_count = min(dev_priv->vbt.edp_lanes,
- max_lane_count);
- DRM_DEBUG_KMS("using min %u lanes per VBT\n",
- min_lane_count);
- }
-
- if (dev_priv->vbt.edp_rate) {
- min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
- DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
- bws[min_clock]);
- }
+ /*
+ * Use the maximum clock and number of lanes the eDP panel
+ * advertizes being capable of. The panels are generally
+ * designed to support only a single clock and lane
+ * configuration, and typically these values correspond to the
+ * native resolution of the panel.
+ */
+ min_lane_count = max_lane_count;
+ min_clock = max_clock;
}
for (; bpp >= 6*3; bpp -= 2*3) {
@@ -1915,6 +1907,10 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->adjusted_mode.flags |= flags;
+ if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
+ tmp & DP_COLOR_RANGE_16_235)
+ pipe_config->limited_color_range = true;
+
pipe_config->has_dp_encoder = true;
intel_dp_get_m_n(crtc, pipe_config);
@@ -3732,7 +3728,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
intel_dp->use_tps3 = true;
- DRM_DEBUG_KMS("Displayport TPS3 supported");
+ DRM_DEBUG_KMS("Displayport TPS3 supported\n");
} else
intel_dp->use_tps3 = false;
@@ -3808,21 +3804,21 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
u8 buf[1];
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
- return -EAGAIN;
+ return -EIO;
if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
return -ENOTTY;
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
DP_TEST_SINK_START) < 0)
- return -EAGAIN;
+ return -EIO;
/* Wait 2 vblanks to be sure we will have the correct CRC value */
intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_wait_for_vblank(dev, intel_crtc->pipe);
if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
- return -EAGAIN;
+ return -EIO;
drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
return 0;
@@ -4395,7 +4391,7 @@ intel_dp_connector_destroy(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
- intel_dp_unset_edid(intel_attached_dp(connector));
+ kfree(intel_connector->detect_edid);
if (!IS_ERR_OR_NULL(intel_connector->edid))
kfree(intel_connector->edid);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 07ce04683c30..ba715229a540 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -35,6 +35,9 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_dp_mst_helper.h>
+#define DIV_ROUND_CLOSEST_ULL(ll, d) \
+({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
+
/**
* _wait_for - magic (register) wait macro
*
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index c5861736b4b0..29ec1535992d 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -712,7 +712,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp, flags = 0;
int dotclock;
@@ -731,9 +732,13 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & HDMI_MODE_SELECT_HDMI)
pipe_config->has_hdmi_sink = true;
- if (tmp & HDMI_MODE_SELECT_HDMI)
+ if (tmp & SDVO_AUDIO_ENABLE)
pipe_config->has_audio = true;
+ if (!HAS_PCH_SPLIT(dev) &&
+ tmp & HDMI_COLOR_RANGE_16_235)
+ pipe_config->limited_color_range = true;
+
pipe_config->adjusted_mode.flags |= flags;
if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
@@ -1501,7 +1506,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
static void intel_hdmi_destroy(struct drm_connector *connector)
{
- intel_hdmi_unset_edid(connector);
+ kfree(to_intel_connector(connector)->detect_edid);
drm_connector_cleanup(connector);
kfree(connector);
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index bd1b28d99920..bafd38b5703e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -300,8 +300,18 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
* Instead, we do the runtime_pm_get/put when creating/destroying requests.
*/
spin_lock_irqsave(&dev_priv->uncore.lock, flags);
- if (dev_priv->uncore.forcewake_count++ == 0)
- dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
+ if (IS_CHERRYVIEW(dev_priv->dev)) {
+ if (dev_priv->uncore.fw_rendercount++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv,
+ FORCEWAKE_RENDER);
+ if (dev_priv->uncore.fw_mediacount++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv,
+ FORCEWAKE_MEDIA);
+ } else {
+ if (dev_priv->uncore.forcewake_count++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv,
+ FORCEWAKE_ALL);
+ }
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
I915_WRITE(RING_ELSP(ring), desc[1]);
@@ -315,8 +325,19 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
/* Release Force Wakeup (see the big comment above). */
spin_lock_irqsave(&dev_priv->uncore.lock, flags);
- if (--dev_priv->uncore.forcewake_count == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
+ if (IS_CHERRYVIEW(dev_priv->dev)) {
+ if (--dev_priv->uncore.fw_rendercount == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv,
+ FORCEWAKE_RENDER);
+ if (--dev_priv->uncore.fw_mediacount == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv,
+ FORCEWAKE_MEDIA);
+ } else {
+ if (--dev_priv->uncore.forcewake_count == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv,
+ FORCEWAKE_ALL);
+ }
+
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index ca52ad2ae7d1..d8de1d5140a7 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -396,6 +396,16 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
return -EINVAL;
}
+/*
+ * If the vendor backlight interface is not in use and ACPI backlight interface
+ * is broken, do not bother processing backlight change requests from firmware.
+ */
+static bool should_ignore_backlight_request(void)
+{
+ return acpi_video_backlight_support() &&
+ !acpi_video_verify_backlight_support();
+}
+
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -404,11 +414,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
- /*
- * If the acpi_video interface is not supposed to be used, don't
- * bother processing backlight level change requests from firmware.
- */
- if (!acpi_video_verify_backlight_support()) {
+ if (should_ignore_backlight_request()) {
DRM_DEBUG_KMS("opregion backlight request ignored\n");
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 18784470a760..0e018cb49147 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -419,9 +419,8 @@ static uint32_t scale(uint32_t source_val,
source_val = clamp(source_val, source_min, source_max);
/* avoid overflows */
- target_val = (uint64_t)(source_val - source_min) *
- (target_max - target_min);
- do_div(target_val, source_max - source_min);
+ target_val = DIV_ROUND_CLOSEST_ULL((uint64_t)(source_val - source_min) *
+ (target_max - target_min), source_max - source_min);
target_val += target_min;
return target_val;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 45f71e6dc544..c27b6140bfd1 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1070,6 +1070,17 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
wm_size = wm->max_wm;
if (wm_size <= 0)
wm_size = wm->default_wm;
+
+ /*
+ * Bspec seems to indicate that the value shouldn't be lower than
+ * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
+ * Lets go for 8 which is the burst size since certain platforms
+ * already use a hardcoded 8 (which is what the spec says should be
+ * done).
+ */
+ if (wm_size <= 8)
+ wm_size = 8;
+
return wm_size;
}
@@ -2274,6 +2285,7 @@ int ilk_wm_max_level(const struct drm_device *dev)
else
return 2;
}
+
static void intel_print_wm_latency(struct drm_device *dev,
const char *name,
const uint16_t wm[5])
@@ -3242,9 +3254,6 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
{
int new_power;
- if (dev_priv->rps.is_bdw_sw_turbo)
- return;
-
new_power = dev_priv->rps.power;
switch (dev_priv->rps.power) {
case LOW_POWER:
@@ -3452,11 +3461,8 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
else if (IS_VALLEYVIEW(dev))
vlv_set_rps_idle(dev_priv);
- else if (!dev_priv->rps.is_bdw_sw_turbo
- || atomic_read(&dev_priv->rps.sw_turbo.flip_received)){
+ else
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
- }
-
dev_priv->rps.last_adj = 0;
}
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -3470,11 +3476,8 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv)
if (dev_priv->rps.enabled) {
if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
- else if (!dev_priv->rps.is_bdw_sw_turbo
- || atomic_read(&dev_priv->rps.sw_turbo.flip_received)){
+ else
gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
- }
-
dev_priv->rps.last_adj = 0;
}
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -3488,17 +3491,18 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
WARN_ON(val > dev_priv->rps.max_freq_softlimit);
WARN_ON(val < dev_priv->rps.min_freq_softlimit);
- DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
- vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
- dev_priv->rps.cur_freq,
- vlv_gpu_freq(dev_priv, val), val);
-
if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
"Odd GPU freq value\n"))
val &= ~1;
- if (val != dev_priv->rps.cur_freq)
+ if (val != dev_priv->rps.cur_freq) {
+ DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+ dev_priv->rps.cur_freq,
+ vlv_gpu_freq(dev_priv, val), val);
+
vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
+ }
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
@@ -3509,26 +3513,21 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
static void gen8_disable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_BROADWELL(dev) && dev_priv->rps.is_bdw_sw_turbo){
- if (atomic_read(&dev_priv->rps.sw_turbo.flip_received))
- del_timer(&dev_priv->rps.sw_turbo.flip_timer);
- dev_priv-> rps.is_bdw_sw_turbo = false;
- } else {
- I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
- I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
- ~dev_priv->pm_rps_events);
- /* Complete PM interrupt masking here doesn't race with the rps work
- * item again unmasking PM interrupts because that is using a different
- * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
- * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
- * gen8_enable_rps will clean up. */
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->rps.pm_iir = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
+ I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
+ ~dev_priv->pm_rps_events);
+ /* Complete PM interrupt masking here doesn't race with the rps work
+ * item again unmasking PM interrupts because that is using a different
+ * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
+ * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
+ * gen8_enable_rps will clean up. */
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->rps.pm_iir = 0;
+ spin_unlock_irq(&dev_priv->irq_lock);
- I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
- }
+ I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
}
static void gen6_disable_rps_interrupts(struct drm_device *dev)
@@ -3686,111 +3685,13 @@ static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_c
dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
}
-static void bdw_sw_calculate_freq(struct drm_device *dev,
- struct intel_rps_bdw_cal *c, u32 *cur_time, u32 *c0)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u64 busy = 0;
- u32 busyness_pct = 0;
- u32 elapsed_time = 0;
- u16 new_freq = 0;
-
- if (!c || !cur_time || !c0)
- return;
-
- if (0 == c->last_c0)
- goto out;
-
- /* Check Evaluation interval */
- elapsed_time = *cur_time - c->last_ts;
- if (elapsed_time < c->eval_interval)
- return;
-
- mutex_lock(&dev_priv->rps.hw_lock);
-
- /*
- * c0 unit in 32*1.28 usec, elapsed_time unit in 1 usec.
- * Whole busyness_pct calculation should be
- * busy = ((u64)(*c0 - c->last_c0) << 5 << 7) / 100;
- * busyness_pct = (u32)(busy * 100 / elapsed_time);
- * The final formula is to simplify CPU calculation
- */
- busy = (u64)(*c0 - c->last_c0) << 12;
- do_div(busy, elapsed_time);
- busyness_pct = (u32)busy;
-
- if (c->is_up && busyness_pct >= c->it_threshold_pct)
- new_freq = (u16)dev_priv->rps.cur_freq + 3;
- if (!c->is_up && busyness_pct <= c->it_threshold_pct)
- new_freq = (u16)dev_priv->rps.cur_freq - 1;
-
- /* Adjust to new frequency busyness and compare with threshold */
- if (0 != new_freq) {
- if (new_freq > dev_priv->rps.max_freq_softlimit)
- new_freq = dev_priv->rps.max_freq_softlimit;
- else if (new_freq < dev_priv->rps.min_freq_softlimit)
- new_freq = dev_priv->rps.min_freq_softlimit;
-
- gen6_set_rps(dev, new_freq);
- }
-
- mutex_unlock(&dev_priv->rps.hw_lock);
-
-out:
- c->last_c0 = *c0;
- c->last_ts = *cur_time;
-}
-
-static void gen8_set_frequency_RP0(struct work_struct *work)
-{
- struct intel_rps_bdw_turbo *p_bdw_turbo =
- container_of(work, struct intel_rps_bdw_turbo, work_max_freq);
- struct intel_gen6_power_mgmt *p_power_mgmt =
- container_of(p_bdw_turbo, struct intel_gen6_power_mgmt, sw_turbo);
- struct drm_i915_private *dev_priv =
- container_of(p_power_mgmt, struct drm_i915_private, rps);
-
- mutex_lock(&dev_priv->rps.hw_lock);
- gen6_set_rps(dev_priv->dev, dev_priv->rps.rp0_freq);
- mutex_unlock(&dev_priv->rps.hw_lock);
-}
-
-static void flip_active_timeout_handler(unsigned long var)
-{
- struct drm_i915_private *dev_priv = (struct drm_i915_private *) var;
-
- del_timer(&dev_priv->rps.sw_turbo.flip_timer);
- atomic_set(&dev_priv->rps.sw_turbo.flip_received, false);
-
- queue_work(dev_priv->wq, &dev_priv->rps.sw_turbo.work_max_freq);
-}
-
-void bdw_software_turbo(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- u32 current_time = I915_READ(TIMESTAMP_CTR); /* unit in usec */
- u32 current_c0 = I915_READ(MCHBAR_PCU_C0); /* unit in 32*1.28 usec */
-
- bdw_sw_calculate_freq(dev, &dev_priv->rps.sw_turbo.up,
- &current_time, &current_c0);
- bdw_sw_calculate_freq(dev, &dev_priv->rps.sw_turbo.down,
- &current_time, &current_c0);
-}
-
static void gen8_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
uint32_t rc6_mask = 0, rp_state_cap;
- uint32_t threshold_up_pct, threshold_down_pct;
- uint32_t ei_up, ei_down; /* up and down evaluation interval */
- u32 rp_ctl_flag;
int unused;
- /* Use software Turbo for BDW */
- dev_priv->rps.is_bdw_sw_turbo = IS_BROADWELL(dev);
-
/* 1a: Software RC state - RC0 */
I915_WRITE(GEN6_RC_STATE, 0);
@@ -3834,74 +3735,35 @@ static void gen8_enable_rps(struct drm_device *dev)
HSW_FREQUENCY(dev_priv->rps.rp1_freq));
I915_WRITE(GEN6_RC_VIDEO_FREQ,
HSW_FREQUENCY(dev_priv->rps.rp1_freq));
- ei_up = 84480; /* 84.48ms */
- ei_down = 448000;
- threshold_up_pct = 90; /* x percent busy */
- threshold_down_pct = 70;
-
- if (dev_priv->rps.is_bdw_sw_turbo) {
- dev_priv->rps.sw_turbo.up.it_threshold_pct = threshold_up_pct;
- dev_priv->rps.sw_turbo.up.eval_interval = ei_up;
- dev_priv->rps.sw_turbo.up.is_up = true;
- dev_priv->rps.sw_turbo.up.last_ts = 0;
- dev_priv->rps.sw_turbo.up.last_c0 = 0;
-
- dev_priv->rps.sw_turbo.down.it_threshold_pct = threshold_down_pct;
- dev_priv->rps.sw_turbo.down.eval_interval = ei_down;
- dev_priv->rps.sw_turbo.down.is_up = false;
- dev_priv->rps.sw_turbo.down.last_ts = 0;
- dev_priv->rps.sw_turbo.down.last_c0 = 0;
-
- /* Start the timer to track if flip comes*/
- dev_priv->rps.sw_turbo.timeout = 200*1000; /* in us */
-
- init_timer(&dev_priv->rps.sw_turbo.flip_timer);
- dev_priv->rps.sw_turbo.flip_timer.function = flip_active_timeout_handler;
- dev_priv->rps.sw_turbo.flip_timer.data = (unsigned long) dev_priv;
- dev_priv->rps.sw_turbo.flip_timer.expires =
- usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies;
- add_timer(&dev_priv->rps.sw_turbo.flip_timer);
- INIT_WORK(&dev_priv->rps.sw_turbo.work_max_freq, gen8_set_frequency_RP0);
-
- atomic_set(&dev_priv->rps.sw_turbo.flip_received, true);
- } else {
- /* NB: Docs say 1s, and 1000000 - which aren't equivalent
- * 1 second timeout*/
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, FREQ_1_28_US(1000000));
+ /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
- /* Docs recommend 900MHz, and 300 MHz respectively */
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- dev_priv->rps.max_freq_softlimit << 24 |
- dev_priv->rps.min_freq_softlimit << 16);
+ /* Docs recommend 900MHz, and 300 MHz respectively */
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ dev_priv->rps.max_freq_softlimit << 24 |
+ dev_priv->rps.min_freq_softlimit << 16);
- I915_WRITE(GEN6_RP_UP_THRESHOLD,
- FREQ_1_28_US(ei_up * threshold_up_pct / 100));
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
- FREQ_1_28_US(ei_down * threshold_down_pct / 100));
- I915_WRITE(GEN6_RP_UP_EI,
- FREQ_1_28_US(ei_up));
- I915_WRITE(GEN6_RP_DOWN_EI,
- FREQ_1_28_US(ei_down));
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
+ I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
+ I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
- }
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
/* 5: Enable RPS */
- rp_ctl_flag = GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG;
- if (!dev_priv->rps.is_bdw_sw_turbo)
- rp_ctl_flag |= GEN6_RP_ENABLE;
-
- I915_WRITE(GEN6_RP_CONTROL, rp_ctl_flag);
-
- /* 6: Ring frequency + overclocking
- * (our driver does this later */
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+
+ /* 6: Ring frequency + overclocking (our driver does this later */
+
gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
- if (!dev_priv->rps.is_bdw_sw_turbo)
- gen8_enable_rps_interrupts(dev);
+
+ gen8_enable_rps_interrupts(dev);
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -5375,8 +5237,6 @@ static void intel_gen6_powersave_work(struct work_struct *work)
rps.delayed_resume_work.work);
struct drm_device *dev = dev_priv->dev;
- dev_priv->rps.is_bdw_sw_turbo = false;
-
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_CHERRYVIEW(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 109de2eeb9a8..0a80e419b589 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -707,7 +707,7 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
* update the number of dwords required based on the
* actual number of workarounds applied
*/
- ret = intel_ring_begin(ring, 24);
+ ret = intel_ring_begin(ring, 18);
if (ret)
return ret;
@@ -722,19 +722,8 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
- /*
- * This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
- * pre-production hardware
- */
intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3,
- _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS
- | GEN8_SAMPLER_POWER_BYPASS_DIS));
-
- intel_ring_emit_wa(ring, GEN7_HALF_SLICE_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
-
- intel_ring_emit_wa(ring, COMMON_SLICE_CHICKEN2,
- _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
+ _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
/* Use Force Non-Coherent whenever executing a 3D context. This is a
* workaround for for a possible hang in the unlikely event a TLB
@@ -1579,7 +1568,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
*/
intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
- intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 1024);
+ intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
intel_ring_emit(ring, cs_offset);
intel_ring_emit(ring, 4096);
intel_ring_emit(ring, offset);
@@ -2203,8 +2192,9 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
return ret;
intel_ring_emit(ring,
- MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
- (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
+ MI_BATCH_BUFFER_START |
+ (flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index a7efbff4dc8f..2df3a937037d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -1846,9 +1846,10 @@ nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp
const int or = ffs(outp->or) - 1;
const u32 loff = (or * 0x800) + (link * 0x80);
const u16 mask = (outp->sorconf.link << 6) | outp->or;
+ struct dcb_output match;
u8 ver, hdr;
- if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp))
+ if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, &match))
nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
index 0a44459844e3..05a278bab247 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
@@ -200,7 +200,6 @@ nvc0_bar_init(struct nouveau_object *object)
nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
- nv_mask(priv, 0x100c80, 0x00000001, 0x00000000);
nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12);
if (priv->bar[0].mem)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index b19a2b3c1081..32f28dc73ef2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -60,6 +60,7 @@ nvc0_fb_init(struct nouveau_object *object)
if (priv->r100c10_page)
nv_wr32(priv, 0x100c10, priv->r100c10 >> 8);
+ nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
index e7b7872481ef..2db0977284f8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
@@ -115,6 +115,7 @@ static int
gf100_ltc_init(struct nouveau_object *object)
{
struct nvkm_ltc_priv *priv = (void *)object;
+ u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
int ret;
ret = nvkm_ltc_init(priv);
@@ -124,6 +125,7 @@ gf100_ltc_init(struct nouveau_object *object)
nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
nv_wr32(priv, 0x17e8d4, priv->tag_base);
+ nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c
index ea716569745d..b39b5d0eb8f9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c
@@ -28,6 +28,7 @@ static int
gk104_ltc_init(struct nouveau_object *object)
{
struct nvkm_ltc_priv *priv = (void *)object;
+ u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
int ret;
ret = nvkm_ltc_init(priv);
@@ -37,6 +38,7 @@ gk104_ltc_init(struct nouveau_object *object)
nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
nv_wr32(priv, 0x17e000, priv->ltc_nr);
nv_wr32(priv, 0x17e8d4, priv->tag_base);
+ nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c
index a26bed86f384..89fc4238f50c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c
@@ -93,6 +93,7 @@ static int
gm107_ltc_init(struct nouveau_object *object)
{
struct nvkm_ltc_priv *priv = (void *)object;
+ u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
int ret;
ret = nvkm_ltc_init(priv);
@@ -101,6 +102,7 @@ gm107_ltc_init(struct nouveau_object *object)
nv_wr32(priv, 0x17e27c, priv->ltc_nr);
nv_wr32(priv, 0x17e278, priv->tag_base);
+ nv_mask(priv, 0x17e264, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 279206997e5c..622424692b3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -46,7 +46,6 @@ static struct nouveau_dsm_priv {
bool dsm_detected;
bool optimus_detected;
acpi_handle dhandle;
- acpi_handle other_handle;
acpi_handle rom_handle;
} nouveau_dsm_priv;
@@ -222,10 +221,9 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
if (!dhandle)
return false;
- if (!acpi_has_method(dhandle, "_DSM")) {
- nouveau_dsm_priv.other_handle = dhandle;
+ if (!acpi_has_method(dhandle, "_DSM"))
return false;
- }
+
if (acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102,
1 << NOUVEAU_DSM_POWER))
retval |= NOUVEAU_DSM_HAS_MUX;
@@ -301,16 +299,6 @@ static bool nouveau_dsm_detect(void)
printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
acpi_method_name);
nouveau_dsm_priv.dsm_detected = true;
- /*
- * On some systems hotplug events are generated for the device
- * being switched off when _DSM is executed. They cause ACPI
- * hotplug to trigger and attempt to remove the device from
- * the system, which causes it to break down. Prevent that from
- * happening by setting the no_hotplug flag for the involved
- * ACPI device objects.
- */
- acpi_bus_no_hotplug(nouveau_dsm_priv.dhandle);
- acpi_bus_no_hotplug(nouveau_dsm_priv.other_handle);
ret = true;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 77c81d6b45ee..fd3dbd59d73e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -285,6 +285,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
struct nouveau_software_chan *swch;
struct nv_dma_v0 args = {};
int ret, i;
+ bool save;
nvif_object_map(chan->object);
@@ -386,7 +387,11 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
}
/* initialise synchronisation */
- return nouveau_fence(chan->drm)->context_new(chan);
+ save = cli->base.super;
+ cli->base.super = true; /* hack until fencenv50 fixed */
+ ret = nouveau_fence(chan->drm)->context_new(chan);
+ cli->base.super = save;
+ return ret;
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 334db3c6e40c..a88e6927f571 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -550,14 +550,12 @@ nouveau_display_destroy(struct drm_device *dev)
}
int
-nouveau_display_suspend(struct drm_device *dev)
+nouveau_display_suspend(struct drm_device *dev, bool runtime)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_crtc *crtc;
nouveau_display_fini(dev);
- NV_INFO(drm, "unpinning framebuffer(s)...\n");
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_framebuffer *nouveau_fb;
@@ -579,12 +577,13 @@ nouveau_display_suspend(struct drm_device *dev)
}
void
-nouveau_display_repin(struct drm_device *dev)
+nouveau_display_resume(struct drm_device *dev, bool runtime)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_crtc *crtc;
- int ret;
+ int ret, head;
+ /* re-pin fb/cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_framebuffer *nouveau_fb;
@@ -606,13 +605,6 @@ nouveau_display_repin(struct drm_device *dev)
if (ret)
NV_ERROR(drm, "Could not pin/map cursor.\n");
}
-}
-
-void
-nouveau_display_resume(struct drm_device *dev)
-{
- struct drm_crtc *crtc;
- int head;
nouveau_display_init(dev);
@@ -627,6 +619,13 @@ nouveau_display_resume(struct drm_device *dev)
for (head = 0; head < dev->mode_config.num_crtc; head++)
drm_vblank_on(dev, head);
+ /* This should ensure we don't hit a locking problem when someone
+ * wakes us up via a connector. We should never go into suspend
+ * while the display is on anyways.
+ */
+ if (runtime)
+ return;
+
drm_helper_resume_force_mode(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 88ca177cb1c7..be3d5947c6be 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -63,9 +63,8 @@ int nouveau_display_create(struct drm_device *dev);
void nouveau_display_destroy(struct drm_device *dev);
int nouveau_display_init(struct drm_device *dev);
void nouveau_display_fini(struct drm_device *dev);
-int nouveau_display_suspend(struct drm_device *dev);
-void nouveau_display_repin(struct drm_device *dev);
-void nouveau_display_resume(struct drm_device *dev);
+int nouveau_display_suspend(struct drm_device *dev, bool runtime);
+void nouveau_display_resume(struct drm_device *dev, bool runtime);
int nouveau_display_vblank_enable(struct drm_device *, int);
void nouveau_display_vblank_disable(struct drm_device *, int);
int nouveau_display_scanoutpos(struct drm_device *, int, unsigned int,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 244d78fc0cb5..57238076049f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -550,9 +550,11 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
struct nouveau_cli *cli;
int ret;
- if (dev->mode_config.num_crtc && !runtime) {
+ if (dev->mode_config.num_crtc) {
+ NV_INFO(drm, "suspending console...\n");
+ nouveau_fbcon_set_suspend(dev, 1);
NV_INFO(drm, "suspending display...\n");
- ret = nouveau_display_suspend(dev);
+ ret = nouveau_display_suspend(dev, runtime);
if (ret)
return ret;
}
@@ -606,7 +608,7 @@ fail_client:
fail_display:
if (dev->mode_config.num_crtc) {
NV_INFO(drm, "resuming display...\n");
- nouveau_display_resume(dev);
+ nouveau_display_resume(dev, runtime);
}
return ret;
}
@@ -621,21 +623,19 @@ int nouveau_pmops_suspend(struct device *dev)
drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
return 0;
- if (drm_dev->mode_config.num_crtc)
- nouveau_fbcon_set_suspend(drm_dev, 1);
-
ret = nouveau_do_suspend(drm_dev, false);
if (ret)
return ret;
pci_save_state(pdev);
pci_disable_device(pdev);
+ pci_ignore_hotplug(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
static int
-nouveau_do_resume(struct drm_device *dev)
+nouveau_do_resume(struct drm_device *dev, bool runtime)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli;
@@ -660,7 +660,9 @@ nouveau_do_resume(struct drm_device *dev)
if (dev->mode_config.num_crtc) {
NV_INFO(drm, "resuming display...\n");
- nouveau_display_repin(dev);
+ nouveau_display_resume(dev, runtime);
+ NV_INFO(drm, "resuming console...\n");
+ nouveau_fbcon_set_suspend(dev, 0);
}
return 0;
@@ -683,47 +685,21 @@ int nouveau_pmops_resume(struct device *dev)
return ret;
pci_set_master(pdev);
- ret = nouveau_do_resume(drm_dev);
- if (ret)
- return ret;
-
- if (drm_dev->mode_config.num_crtc) {
- nouveau_display_resume(drm_dev);
- nouveau_fbcon_set_suspend(drm_dev, 0);
- }
-
- return 0;
+ return nouveau_do_resume(drm_dev, false);
}
static int nouveau_pmops_freeze(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- int ret;
-
- if (drm_dev->mode_config.num_crtc)
- nouveau_fbcon_set_suspend(drm_dev, 1);
-
- ret = nouveau_do_suspend(drm_dev, false);
- return ret;
+ return nouveau_do_suspend(drm_dev, false);
}
static int nouveau_pmops_thaw(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- int ret;
-
- ret = nouveau_do_resume(drm_dev);
- if (ret)
- return ret;
-
- if (drm_dev->mode_config.num_crtc) {
- nouveau_display_resume(drm_dev);
- nouveau_fbcon_set_suspend(drm_dev, 0);
- }
-
- return 0;
+ return nouveau_do_resume(drm_dev, false);
}
@@ -979,7 +955,7 @@ static int nouveau_pmops_runtime_resume(struct device *dev)
return ret;
pci_set_master(pdev);
- ret = nouveau_do_resume(drm_dev);
+ ret = nouveau_do_resume(drm_dev, true);
drm_kms_helper_poll_enable(drm_dev);
/* do magic */
nvif_mask(device, 0x88488, (1 << 25), (1 << 25));
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index dc1753c368e3..593ef8a2a069 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -487,6 +487,16 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
.fb_probe = nouveau_fbcon_create,
};
+static void
+nouveau_fbcon_set_suspend_work(struct work_struct *work)
+{
+ struct nouveau_fbdev *fbcon = container_of(work, typeof(*fbcon), work);
+ console_lock();
+ nouveau_fbcon_accel_restore(fbcon->dev);
+ nouveau_fbcon_zfill(fbcon->dev, fbcon);
+ fb_set_suspend(fbcon->helper.fbdev, FBINFO_STATE_RUNNING);
+ console_unlock();
+}
int
nouveau_fbcon_init(struct drm_device *dev)
@@ -504,6 +514,7 @@ nouveau_fbcon_init(struct drm_device *dev)
if (!fbcon)
return -ENOMEM;
+ INIT_WORK(&fbcon->work, nouveau_fbcon_set_suspend_work);
fbcon->dev = dev;
drm->fbcon = fbcon;
@@ -552,14 +563,14 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
if (drm->fbcon) {
- console_lock();
- if (state == 0) {
- nouveau_fbcon_accel_restore(dev);
- nouveau_fbcon_zfill(dev, drm->fbcon);
+ if (state == FBINFO_STATE_RUNNING) {
+ schedule_work(&drm->fbcon->work);
+ return;
}
+ flush_work(&drm->fbcon->work);
+ console_lock();
fb_set_suspend(drm->fbcon->helper.fbdev, state);
- if (state == 1)
- nouveau_fbcon_accel_save_disable(dev);
+ nouveau_fbcon_accel_save_disable(dev);
console_unlock();
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index 1e2e9e27a03b..6208e70e4a1c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -36,6 +36,7 @@ struct nouveau_fbdev {
struct nouveau_framebuffer nouveau_fb;
struct list_head fbdev_list;
struct drm_device *dev;
+ struct work_struct work;
unsigned int saved_flags;
struct nvif_object surf2d;
struct nvif_object clip;
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c
index 47ca88623753..6544b84f0303 100644
--- a/drivers/gpu/drm/nouveau/nouveau_nvif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c
@@ -40,12 +40,12 @@
#include "nouveau_usif.h"
static void
-nvkm_client_unmap(void *priv, void *ptr, u32 size)
+nvkm_client_unmap(void *priv, void __iomem *ptr, u32 size)
{
iounmap(ptr);
}
-static void *
+static void __iomem *
nvkm_client_map(void *priv, u64 handle, u32 size)
{
return ioremap(handle, size);
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 18d55d447248..c7592ec8ecb8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -108,7 +108,16 @@ void
nouveau_vga_fini(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
+ bool runtime = false;
+
+ if (nouveau_runtime_pm == 1)
+ runtime = true;
+ if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
+ runtime = true;
+
vga_switcheroo_unregister_client(dev->pdev);
+ if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
+ vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
diff --git a/drivers/gpu/drm/nouveau/nvif/driver.h b/drivers/gpu/drm/nouveau/nvif/driver.h
index b72a8f0c2758..ac4bdb3ea506 100644
--- a/drivers/gpu/drm/nouveau/nvif/driver.h
+++ b/drivers/gpu/drm/nouveau/nvif/driver.h
@@ -9,8 +9,8 @@ struct nvif_driver {
int (*suspend)(void *priv);
int (*resume)(void *priv);
int (*ioctl)(void *priv, bool super, void *data, u32 size, void **hack);
- void *(*map)(void *priv, u64 handle, u32 size);
- void (*unmap)(void *priv, void *ptr, u32 size);
+ void __iomem *(*map)(void *priv, u64 handle, u32 size);
+ void (*unmap)(void *priv, void __iomem *ptr, u32 size);
bool keep;
};
diff --git a/drivers/gpu/drm/nouveau/nvif/object.h b/drivers/gpu/drm/nouveau/nvif/object.h
index fac3a3bbec44..fe519179b76c 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/nvif/object.h
@@ -14,7 +14,7 @@ struct nvif_object {
void *priv; /*XXX: hack */
void (*dtor)(struct nvif_object *);
struct {
- void *ptr;
+ void __iomem *ptr;
u32 size;
} map;
};
@@ -42,7 +42,7 @@ void nvif_object_unmap(struct nvif_object *);
struct nvif_object *_object = nvif_object(a); \
u32 _data; \
if (likely(_object->map.ptr)) \
- _data = ioread##b##_native((u8 *)_object->map.ptr + (c)); \
+ _data = ioread##b##_native((u8 __iomem *)_object->map.ptr + (c)); \
else \
_data = nvif_object_rd(_object, (b) / 8, (c)); \
_data; \
@@ -50,7 +50,7 @@ void nvif_object_unmap(struct nvif_object *);
#define nvif_wr(a,b,c,d) ({ \
struct nvif_object *_object = nvif_object(a); \
if (likely(_object->map.ptr)) \
- iowrite##b##_native((d), (u8 *)_object->map.ptr + (c)); \
+ iowrite##b##_native((d), (u8 __iomem *)_object->map.ptr + (c)); \
else \
nvif_object_wr(_object, (b) / 8, (c), (d)); \
})
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 7d7aed5357f0..d01b87991422 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -72,7 +72,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \
- radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o dce3_1_afmt.o \
+ radeon_pm.o atombios_dp.o r600_hdmi.o dce3_1_afmt.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o \
evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index a7f2ddf09a9d..b8cd7975f797 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -291,29 +291,6 @@ static void radeon_atom_backlight_exit(struct radeon_encoder *encoder)
bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
struct drm_display_mode *mode);
-
-static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
-{
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_LVDS:
- case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- case ENCODER_OBJECT_ID_INTERNAL_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_DDI:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
- return true;
- default:
- return false;
- }
-}
-
static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index f81d7ca134db..0b2929de9f41 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -24,6 +24,7 @@
#include "drmP.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "btcd.h"
#include "r600_dpm.h"
#include "cypress_dpm.h"
@@ -2099,7 +2100,6 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
bool disable_mclk_switching;
u32 mclk, sclk;
u16 vddc, vddci;
- u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
btc_dpm_vblank_too_short(rdev))
@@ -2141,39 +2141,6 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
ps->low.vddci = max_limits->vddci;
}
- /* limit clocks to max supported clocks based on voltage dependency tables */
- btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
- &max_sclk_vddc);
- btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
- &max_mclk_vddci);
- btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
- &max_mclk_vddc);
-
- if (max_sclk_vddc) {
- if (ps->low.sclk > max_sclk_vddc)
- ps->low.sclk = max_sclk_vddc;
- if (ps->medium.sclk > max_sclk_vddc)
- ps->medium.sclk = max_sclk_vddc;
- if (ps->high.sclk > max_sclk_vddc)
- ps->high.sclk = max_sclk_vddc;
- }
- if (max_mclk_vddci) {
- if (ps->low.mclk > max_mclk_vddci)
- ps->low.mclk = max_mclk_vddci;
- if (ps->medium.mclk > max_mclk_vddci)
- ps->medium.mclk = max_mclk_vddci;
- if (ps->high.mclk > max_mclk_vddci)
- ps->high.mclk = max_mclk_vddci;
- }
- if (max_mclk_vddc) {
- if (ps->low.mclk > max_mclk_vddc)
- ps->low.mclk = max_mclk_vddc;
- if (ps->medium.mclk > max_mclk_vddc)
- ps->medium.mclk = max_mclk_vddc;
- if (ps->high.mclk > max_mclk_vddc)
- ps->high.mclk = max_mclk_vddc;
- }
-
/* XXX validate the min clocks required for display */
if (disable_mclk_switching) {
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index d416bb2ff48d..11a55e9dad7f 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -24,6 +24,7 @@
#include <linux/firmware.h>
#include "drmP.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "radeon_ucode.h"
#include "cikd.h"
#include "r600_dpm.h"
@@ -162,8 +163,6 @@ static const struct ci_pt_config_reg didt_config_ci[] =
};
extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
-extern void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
- u32 *max_clock);
extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
u32 arb_freq_src, u32 arb_freq_dest);
extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
@@ -748,7 +747,6 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
struct radeon_clock_and_voltage_limits *max_limits;
bool disable_mclk_switching;
u32 sclk, mclk;
- u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
int i;
if (rps->vce_active) {
@@ -784,29 +782,6 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
}
}
- /* limit clocks to max supported clocks based on voltage dependency tables */
- btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
- &max_sclk_vddc);
- btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
- &max_mclk_vddci);
- btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
- &max_mclk_vddc);
-
- for (i = 0; i < ps->performance_level_count; i++) {
- if (max_sclk_vddc) {
- if (ps->performance_levels[i].sclk > max_sclk_vddc)
- ps->performance_levels[i].sclk = max_sclk_vddc;
- }
- if (max_mclk_vddci) {
- if (ps->performance_levels[i].mclk > max_mclk_vddci)
- ps->performance_levels[i].mclk = max_mclk_vddci;
- }
- if (max_mclk_vddc) {
- if (ps->performance_levels[i].mclk > max_mclk_vddc)
- ps->performance_levels[i].mclk = max_mclk_vddc;
- }
- }
-
/* XXX validate the min clocks required for display */
if (disable_mclk_switching) {
@@ -5293,9 +5268,13 @@ int ci_dpm_init(struct radeon_device *rdev)
void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_ps *rps = &pi->current_rps;
u32 sclk = ci_get_average_sclk_freq(rdev);
u32 mclk = ci_get_average_mclk_freq(rdev);
+ seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis");
+ seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
seq_printf(m, "power level avg sclk: %u mclk: %u\n",
sclk, mclk);
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 0d761f73a7fa..377afa504d2b 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3993,7 +3993,7 @@ struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(sem, resv, false);
+ radeon_semaphore_sync_resv(rdev, sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
@@ -4235,7 +4235,7 @@ static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
WREG32(CP_PFP_UCODE_ADDR, 0);
for (i = 0; i < fw_size; i++)
WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
- WREG32(CP_PFP_UCODE_ADDR, 0);
+ WREG32(CP_PFP_UCODE_ADDR, le32_to_cpu(pfp_hdr->header.ucode_version));
/* CE */
fw_data = (const __le32 *)
@@ -4244,7 +4244,7 @@ static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
WREG32(CP_CE_UCODE_ADDR, 0);
for (i = 0; i < fw_size; i++)
WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
- WREG32(CP_CE_UCODE_ADDR, 0);
+ WREG32(CP_CE_UCODE_ADDR, le32_to_cpu(ce_hdr->header.ucode_version));
/* ME */
fw_data = (const __be32 *)
@@ -4253,7 +4253,8 @@ static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
WREG32(CP_ME_RAM_WADDR, 0);
for (i = 0; i < fw_size; i++)
WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
- WREG32(CP_ME_RAM_WADDR, 0);
+ WREG32(CP_ME_RAM_WADDR, le32_to_cpu(me_hdr->header.ucode_version));
+ WREG32(CP_ME_RAM_RADDR, le32_to_cpu(me_hdr->header.ucode_version));
} else {
const __be32 *fw_data;
@@ -4279,10 +4280,6 @@ static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
WREG32(CP_ME_RAM_WADDR, 0);
}
- WREG32(CP_PFP_UCODE_ADDR, 0);
- WREG32(CP_CE_UCODE_ADDR, 0);
- WREG32(CP_ME_RAM_WADDR, 0);
- WREG32(CP_ME_RAM_RADDR, 0);
return 0;
}
@@ -4564,7 +4561,7 @@ static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
for (i = 0; i < fw_size; i++)
WREG32(CP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++));
- WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
+ WREG32(CP_MEC_ME1_UCODE_ADDR, le32_to_cpu(mec_hdr->header.ucode_version));
/* MEC2 */
if (rdev->family == CHIP_KAVERI) {
@@ -4578,7 +4575,7 @@ static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
for (i = 0; i < fw_size; i++)
WREG32(CP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++));
- WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ WREG32(CP_MEC_ME2_UCODE_ADDR, le32_to_cpu(mec2_hdr->header.ucode_version));
}
} else {
const __be32 *fw_data;
@@ -4690,7 +4687,7 @@ static int cik_mec_init(struct radeon_device *rdev)
r = radeon_bo_create(rdev,
rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2,
PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, 0, NULL,
+ RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
&rdev->mec.hpd_eop_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r);
@@ -4804,7 +4801,7 @@ struct bonaire_mqd
*/
static int cik_cp_compute_resume(struct radeon_device *rdev)
{
- int r, i, idx;
+ int r, i, j, idx;
u32 tmp;
bool use_doorbell = true;
u64 hqd_gpu_addr;
@@ -4861,7 +4858,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
sizeof(struct bonaire_mqd),
PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, 0, NULL,
- &rdev->ring[idx].mqd_obj);
+ NULL, &rdev->ring[idx].mqd_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r);
return r;
@@ -4923,7 +4920,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
mqd->queue_state.cp_hqd_pq_wptr= 0;
if (RREG32(CP_HQD_ACTIVE) & 1) {
WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
- for (i = 0; i < rdev->usec_timeout; i++) {
+ for (j = 0; j < rdev->usec_timeout; j++) {
if (!(RREG32(CP_HQD_ACTIVE) & 1))
break;
udelay(1);
@@ -6227,7 +6224,7 @@ static int cik_rlc_resume(struct radeon_device *rdev)
WREG32(RLC_GPM_UCODE_ADDR, 0);
for (i = 0; i < size; i++)
WREG32(RLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
- WREG32(RLC_GPM_UCODE_ADDR, 0);
+ WREG32(RLC_GPM_UCODE_ADDR, le32_to_cpu(hdr->header.ucode_version));
} else {
const __be32 *fw_data;
@@ -7752,17 +7749,17 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) {
+ wptr &= ~RB_OVERFLOW;
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 16). Hopefully
* this should allow us to catchup.
*/
- dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
- wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
- wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
@@ -8252,6 +8249,7 @@ restart_ih:
/* wptr/rptr are in bytes! */
rptr += 16;
rptr &= rdev->ih.ptr_mask;
+ WREG32(IH_RB_RPTR, rptr);
}
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
@@ -8262,7 +8260,6 @@ restart_ih:
if (queue_thermal)
schedule_work(&rdev->pm.dpm.thermal.work);
rdev->ih.rptr = rptr;
- WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0);
/* make sure wptr hasn't changed while processing */
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index c01a6100c318..4e8432d07f15 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -489,13 +489,6 @@ int cik_sdma_resume(struct radeon_device *rdev)
{
int r;
- /* Reset dma */
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
- RREG32(SRBM_SOFT_RESET);
-
r = cik_sdma_load_microcode(rdev);
if (r)
return r;
@@ -571,7 +564,7 @@ struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(sem, resv, false);
+ radeon_semaphore_sync_resv(rdev, sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
@@ -618,16 +611,19 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
{
unsigned i;
int r;
- void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ unsigned index;
u32 tmp;
+ u64 gpu_addr;
- if (!ptr) {
- DRM_ERROR("invalid vram scratch pointer\n");
- return -EINVAL;
- }
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ index = R600_WB_DMA_RING_TEST_OFFSET;
+ else
+ index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
+
+ gpu_addr = rdev->wb.gpu_addr + index;
tmp = 0xCAFEDEAD;
- writel(tmp, ptr);
+ rdev->wb.wb[index/4] = cpu_to_le32(tmp);
r = radeon_ring_lock(rdev, ring, 5);
if (r) {
@@ -635,14 +631,14 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
return r;
}
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
- radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr));
+ radeon_ring_write(ring, lower_32_bits(gpu_addr));
+ radeon_ring_write(ring, upper_32_bits(gpu_addr));
radeon_ring_write(ring, 1); /* number of DWs to follow */
radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = readl(ptr);
+ tmp = le32_to_cpu(rdev->wb.wb[index/4]);
if (tmp == 0xDEADBEEF)
break;
DRM_UDELAY(1);
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 47d31e915758..9aad0327e4d1 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -24,6 +24,7 @@
#include "drmP.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "evergreend.h"
#include "r600_dpm.h"
#include "cypress_dpm.h"
diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
index 51800e340a57..2fe8cfc966d9 100644
--- a/drivers/gpu/drm/radeon/dce3_1_afmt.c
+++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
@@ -32,7 +32,7 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
struct drm_connector *connector;
struct radeon_connector *radeon_connector = NULL;
u32 tmp;
- u8 *sadb;
+ u8 *sadb = NULL;
int sad_count;
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
@@ -49,8 +49,8 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
if (sad_count < 0) {
- DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
- return;
+ DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
+ sad_count = 0;
}
/* program the speaker allocation */
@@ -165,7 +165,7 @@ void dce3_1_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *m
/* disable audio prior to setting up hw */
dig->afmt->pin = r600_audio_get_pin(rdev);
- r600_audio_enable(rdev, dig->afmt->pin, false);
+ r600_audio_enable(rdev, dig->afmt->pin, 0);
r600_audio_set_dto(encoder, mode->clock);
@@ -240,5 +240,5 @@ void dce3_1_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *m
r600_hdmi_audio_workaround(encoder);
/* enable audio after to setting up hw */
- r600_audio_enable(rdev, dig->afmt->pin, true);
+ r600_audio_enable(rdev, dig->afmt->pin, 0xf);
}
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index ab29f953a767..f312edf4d50e 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -155,7 +155,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
struct drm_connector *connector;
struct radeon_connector *radeon_connector = NULL;
u32 offset, tmp;
- u8 *sadb;
+ u8 *sadb = NULL;
int sad_count;
if (!dig || !dig->afmt || !dig->afmt->pin)
@@ -176,9 +176,9 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
- if (sad_count <= 0) {
- DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
- return;
+ if (sad_count < 0) {
+ DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
+ sad_count = 0;
}
/* program the speaker allocation */
@@ -284,13 +284,13 @@ static int dce6_audio_chipset_supported(struct radeon_device *rdev)
void dce6_audio_enable(struct radeon_device *rdev,
struct r600_audio_pin *pin,
- bool enable)
+ u8 enable_mask)
{
if (!pin)
return;
- WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL,
- enable ? AUDIO_ENABLED : 0);
+ WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ enable_mask ? AUDIO_ENABLED : 0);
}
static const u32 pin_offsets[7] =
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index dbca60c7d097..a31f1ca40c6a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -22,7 +22,6 @@
* Authors: Alex Deucher
*/
#include <linux/firmware.h>
-#include <linux/platform_device.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include "radeon.h"
@@ -4023,7 +4022,7 @@ int sumo_rlc_init(struct radeon_device *rdev)
if (rdev->rlc.save_restore_obj == NULL) {
r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
- &rdev->rlc.save_restore_obj);
+ NULL, &rdev->rlc.save_restore_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
return r;
@@ -4102,7 +4101,7 @@ int sumo_rlc_init(struct radeon_device *rdev)
if (rdev->rlc.clear_state_obj == NULL) {
r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
- &rdev->rlc.clear_state_obj);
+ NULL, &rdev->rlc.clear_state_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
sumo_rlc_fini(rdev);
@@ -4179,7 +4178,7 @@ int sumo_rlc_init(struct radeon_device *rdev)
r = radeon_bo_create(rdev, rdev->rlc.cp_table_size,
PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
- &rdev->rlc.cp_table_obj);
+ NULL, &rdev->rlc.cp_table_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r);
sumo_rlc_fini(rdev);
@@ -4749,17 +4748,17 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) {
+ wptr &= ~RB_OVERFLOW;
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 16). Hopefully
* this should allow us to catchup.
*/
- dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
- wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
- wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
@@ -5137,6 +5136,7 @@ restart_ih:
/* wptr/rptr are in bytes! */
rptr += 16;
rptr &= rdev->ih.ptr_mask;
+ WREG32(IH_RB_RPTR, rptr);
}
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
@@ -5145,7 +5145,6 @@ restart_ih:
if (queue_thermal && rdev->pm.dpm_enabled)
schedule_work(&rdev->pm.dpm.thermal.work);
rdev->ih.rptr = rptr;
- WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0);
/* make sure wptr hasn't changed while processing */
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index 946f37d0b469..66bcfadeedd1 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -133,7 +133,7 @@ struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(sem, resv, false);
+ radeon_semaphore_sync_resv(rdev, sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 278c7a139d74..53abd9b17a50 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -38,6 +38,37 @@ extern void dce6_afmt_select_pin(struct drm_encoder *encoder);
extern void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode);
+/* enable the audio stream */
+static void dce4_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ u8 enable_mask)
+{
+ u32 tmp = RREG32(AZ_HOT_PLUG_CONTROL);
+
+ if (!pin)
+ return;
+
+ if (enable_mask) {
+ tmp |= AUDIO_ENABLED;
+ if (enable_mask & 1)
+ tmp |= PIN0_AUDIO_ENABLED;
+ if (enable_mask & 2)
+ tmp |= PIN1_AUDIO_ENABLED;
+ if (enable_mask & 4)
+ tmp |= PIN2_AUDIO_ENABLED;
+ if (enable_mask & 8)
+ tmp |= PIN3_AUDIO_ENABLED;
+ } else {
+ tmp &= ~(AUDIO_ENABLED |
+ PIN0_AUDIO_ENABLED |
+ PIN1_AUDIO_ENABLED |
+ PIN2_AUDIO_ENABLED |
+ PIN3_AUDIO_ENABLED);
+ }
+
+ WREG32(AZ_HOT_PLUG_CONTROL, tmp);
+}
+
/*
* update the N and CTS parameters for a given pixel clock rate
*/
@@ -102,7 +133,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
struct drm_connector *connector;
struct radeon_connector *radeon_connector = NULL;
u32 tmp;
- u8 *sadb;
+ u8 *sadb = NULL;
int sad_count;
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
@@ -118,9 +149,9 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
- if (sad_count <= 0) {
- DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
- return;
+ if (sad_count < 0) {
+ DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
+ sad_count = 0;
}
/* program the speaker allocation */
@@ -318,10 +349,10 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
/* disable audio prior to setting up hw */
if (ASIC_IS_DCE6(rdev)) {
dig->afmt->pin = dce6_audio_get_pin(rdev);
- dce6_audio_enable(rdev, dig->afmt->pin, false);
+ dce6_audio_enable(rdev, dig->afmt->pin, 0);
} else {
dig->afmt->pin = r600_audio_get_pin(rdev);
- r600_audio_enable(rdev, dig->afmt->pin, false);
+ dce4_audio_enable(rdev, dig->afmt->pin, 0);
}
evergreen_audio_set_dto(encoder, mode->clock);
@@ -463,13 +494,15 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
/* enable audio after to setting up hw */
if (ASIC_IS_DCE6(rdev))
- dce6_audio_enable(rdev, dig->afmt->pin, true);
+ dce6_audio_enable(rdev, dig->afmt->pin, 1);
else
- r600_audio_enable(rdev, dig->afmt->pin, true);
+ dce4_audio_enable(rdev, dig->afmt->pin, 0xf);
}
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -482,6 +515,14 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
if (!enable && !dig->afmt->enabled)
return;
+ if (!enable && dig->afmt->pin) {
+ if (ASIC_IS_DCE6(rdev))
+ dce6_audio_enable(rdev, dig->afmt->pin, 0);
+ else
+ dce4_audio_enable(rdev, dig->afmt->pin, 0);
+ dig->afmt->pin = NULL;
+ }
+
dig->afmt->enabled = enable;
DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 8b58e11b64fa..1dd976f447fa 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -33,6 +33,8 @@
#define KV_MINIMUM_ENGINE_CLOCK 800
#define SMC_RAM_END 0x40000
+static int kv_enable_nb_dpm(struct radeon_device *rdev,
+ bool enable);
static void kv_init_graphics_levels(struct radeon_device *rdev);
static int kv_calculate_ds_divider(struct radeon_device *rdev);
static int kv_calculate_nbps_level_settings(struct radeon_device *rdev);
@@ -1295,6 +1297,9 @@ void kv_dpm_disable(struct radeon_device *rdev)
{
kv_smc_bapm_enable(rdev, false);
+ if (rdev->family == CHIP_MULLINS)
+ kv_enable_nb_dpm(rdev, false);
+
/* powerup blocks */
kv_dpm_powergate_acp(rdev, false);
kv_dpm_powergate_samu(rdev, false);
@@ -1769,15 +1774,24 @@ static int kv_update_dfs_bypass_settings(struct radeon_device *rdev,
return ret;
}
-static int kv_enable_nb_dpm(struct radeon_device *rdev)
+static int kv_enable_nb_dpm(struct radeon_device *rdev,
+ bool enable)
{
struct kv_power_info *pi = kv_get_pi(rdev);
int ret = 0;
- if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
- ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
- if (ret == 0)
- pi->nb_dpm_enabled = true;
+ if (enable) {
+ if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
+ ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
+ if (ret == 0)
+ pi->nb_dpm_enabled = true;
+ }
+ } else {
+ if (pi->enable_nb_dpm && pi->nb_dpm_enabled) {
+ ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Disable);
+ if (ret == 0)
+ pi->nb_dpm_enabled = false;
+ }
}
return ret;
@@ -1864,7 +1878,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
}
kv_update_sclk_t(rdev);
if (rdev->family == CHIP_MULLINS)
- kv_enable_nb_dpm(rdev);
+ kv_enable_nb_dpm(rdev, true);
}
} else {
if (pi->enable_dpm) {
@@ -1889,7 +1903,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
}
kv_update_acp_boot_level(rdev);
kv_update_sclk_t(rdev);
- kv_enable_nb_dpm(rdev);
+ kv_enable_nb_dpm(rdev, true);
}
}
@@ -2773,6 +2787,8 @@ void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
SMU_VOLTAGE_CURRENT_LEVEL_SHIFT;
vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp);
+ seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
+ seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en");
seq_printf(m, "power level %d sclk: %u vddc: %u\n",
current_index, sclk, vddc);
}
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index 8a3e6221cece..f26f0a9fb522 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -191,12 +191,6 @@ int cayman_dma_resume(struct radeon_device *rdev)
u32 reg_offset, wb_offset;
int i, r;
- /* Reset dma */
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
-
for (i = 0; i < 2; i++) {
if (i == 0) {
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 01fc4888e6fe..6d2f16cf2c1c 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -23,6 +23,7 @@
#include "drmP.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "nid.h"
#include "r600_dpm.h"
#include "ni_dpm.h"
@@ -789,7 +790,6 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
bool disable_mclk_switching;
u32 mclk;
u16 vddci;
- u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
int i;
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -816,29 +816,6 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
}
}
- /* limit clocks to max supported clocks based on voltage dependency tables */
- btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
- &max_sclk_vddc);
- btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
- &max_mclk_vddci);
- btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
- &max_mclk_vddc);
-
- for (i = 0; i < ps->performance_level_count; i++) {
- if (max_sclk_vddc) {
- if (ps->performance_levels[i].sclk > max_sclk_vddc)
- ps->performance_levels[i].sclk = max_sclk_vddc;
- }
- if (max_mclk_vddci) {
- if (ps->performance_levels[i].mclk > max_mclk_vddci)
- ps->performance_levels[i].mclk = max_mclk_vddci;
- }
- if (max_mclk_vddc) {
- if (ps->performance_levels[i].mclk > max_mclk_vddc)
- ps->performance_levels[i].mclk = max_mclk_vddc;
- }
- }
-
/* XXX validate the min clocks required for display */
/* adjust low state */
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index c6b486f888d5..10f8be0ee173 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -821,6 +821,20 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
return RREG32(RADEON_CRTC2_CRNT_FRAME);
}
+/**
+ * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
+ * rdev: radeon device structure
+ * ring: ring buffer struct for emitting packets
+ */
+static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
+ RADEON_HDP_READ_BUFFER_INVALIDATE);
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
+}
+
/* Who ever call radeon_fence_emit should call ring_lock and ask
* for enough space (today caller are ib schedule and buffer move) */
void r100_fence_ring_emit(struct radeon_device *rdev,
@@ -1059,20 +1073,6 @@ void r100_gfx_set_wptr(struct radeon_device *rdev,
(void)RREG32(RADEON_CP_RB_WPTR);
}
-/**
- * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
- * rdev: radeon device structure
- * ring: ring buffer struct for emitting packets
- */
-void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
- RADEON_HDP_READ_BUFFER_INVALIDATE);
- radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
-}
-
static void r100_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 25f367ac4637..56b02927cd3d 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1430,7 +1430,7 @@ int r600_vram_scratch_init(struct radeon_device *rdev)
if (rdev->vram_scratch.robj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- 0, NULL, &rdev->vram_scratch.robj);
+ 0, NULL, NULL, &rdev->vram_scratch.robj);
if (r) {
return r;
}
@@ -2912,7 +2912,7 @@ struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(sem, resv, false);
+ radeon_semaphore_sync_resv(rdev, sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
@@ -3368,7 +3368,7 @@ int r600_ih_ring_alloc(struct radeon_device *rdev)
r = radeon_bo_create(rdev, rdev->ih.ring_size,
PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, 0,
- NULL, &rdev->ih.ring_obj);
+ NULL, NULL, &rdev->ih.ring_obj);
if (r) {
DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
return r;
@@ -3925,17 +3925,17 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) {
+ wptr &= ~RB_OVERFLOW;
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 16). Hopefully
* this should allow us to catchup.
*/
- dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
- wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
- wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
@@ -4181,6 +4181,7 @@ restart_ih:
/* wptr/rptr are in bytes! */
rptr += 16;
rptr &= rdev->ih.ptr_mask;
+ WREG32(IH_RB_RPTR, rptr);
}
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
@@ -4189,7 +4190,6 @@ restart_ih:
if (queue_thermal && rdev->pm.dpm_enabled)
schedule_work(&rdev->pm.dpm.thermal.work);
rdev->ih.rptr = rptr;
- WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0);
/* make sure wptr hasn't changed while processing */
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
deleted file mode 100644
index bffac10c4296..000000000000
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * Copyright 2008 Advanced Micro Devices, Inc.
- * Copyright 2008 Red Hat Inc.
- * Copyright 2009 Christian König.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Christian König
- */
-#include <drm/drmP.h>
-#include "radeon.h"
-#include "radeon_reg.h"
-#include "radeon_asic.h"
-#include "atom.h"
-
-/*
- * check if enc_priv stores radeon_encoder_atom_dig
- */
-static bool radeon_dig_encoder(struct drm_encoder *encoder)
-{
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_LVDS:
- case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- case ENCODER_OBJECT_ID_INTERNAL_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_DDI:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- return true;
- }
- return false;
-}
-
-/*
- * check if the chipset is supported
- */
-static int r600_audio_chipset_supported(struct radeon_device *rdev)
-{
- return ASIC_IS_DCE2(rdev) && !ASIC_IS_NODCE(rdev);
-}
-
-struct r600_audio_pin r600_audio_status(struct radeon_device *rdev)
-{
- struct r600_audio_pin status;
- uint32_t value;
-
- value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
-
- /* number of channels */
- status.channels = (value & 0x7) + 1;
-
- /* bits per sample */
- switch ((value & 0xF0) >> 4) {
- case 0x0:
- status.bits_per_sample = 8;
- break;
- case 0x1:
- status.bits_per_sample = 16;
- break;
- case 0x2:
- status.bits_per_sample = 20;
- break;
- case 0x3:
- status.bits_per_sample = 24;
- break;
- case 0x4:
- status.bits_per_sample = 32;
- break;
- default:
- dev_err(rdev->dev, "Unknown bits per sample 0x%x, using 16\n",
- (int)value);
- status.bits_per_sample = 16;
- }
-
- /* current sampling rate in HZ */
- if (value & 0x4000)
- status.rate = 44100;
- else
- status.rate = 48000;
- status.rate *= ((value >> 11) & 0x7) + 1;
- status.rate /= ((value >> 8) & 0x7) + 1;
-
- value = RREG32(R600_AUDIO_STATUS_BITS);
-
- /* iec 60958 status bits */
- status.status_bits = value & 0xff;
-
- /* iec 60958 category code */
- status.category_code = (value >> 8) & 0xff;
-
- return status;
-}
-
-/*
- * update all hdmi interfaces with current audio parameters
- */
-void r600_audio_update_hdmi(struct work_struct *work)
-{
- struct radeon_device *rdev = container_of(work, struct radeon_device,
- audio_work);
- struct drm_device *dev = rdev->ddev;
- struct r600_audio_pin audio_status = r600_audio_status(rdev);
- struct drm_encoder *encoder;
- bool changed = false;
-
- if (rdev->audio.pin[0].channels != audio_status.channels ||
- rdev->audio.pin[0].rate != audio_status.rate ||
- rdev->audio.pin[0].bits_per_sample != audio_status.bits_per_sample ||
- rdev->audio.pin[0].status_bits != audio_status.status_bits ||
- rdev->audio.pin[0].category_code != audio_status.category_code) {
- rdev->audio.pin[0] = audio_status;
- changed = true;
- }
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (!radeon_dig_encoder(encoder))
- continue;
- if (changed || r600_hdmi_buffer_status_changed(encoder))
- r600_hdmi_update_audio_settings(encoder);
- }
-}
-
-/* enable the audio stream */
-void r600_audio_enable(struct radeon_device *rdev,
- struct r600_audio_pin *pin,
- bool enable)
-{
- u32 value = 0;
-
- if (!pin)
- return;
-
- if (ASIC_IS_DCE4(rdev)) {
- if (enable) {
- value |= 0x81000000; /* Required to enable audio */
- value |= 0x0e1000f0; /* fglrx sets that too */
- }
- WREG32(EVERGREEN_AUDIO_ENABLE, value);
- } else {
- WREG32_P(R600_AUDIO_ENABLE,
- enable ? 0x81000000 : 0x0, ~0x81000000);
- }
-}
-
-/*
- * initialize the audio vars
- */
-int r600_audio_init(struct radeon_device *rdev)
-{
- if (!radeon_audio || !r600_audio_chipset_supported(rdev))
- return 0;
-
- rdev->audio.enabled = true;
-
- rdev->audio.num_pins = 1;
- rdev->audio.pin[0].channels = -1;
- rdev->audio.pin[0].rate = -1;
- rdev->audio.pin[0].bits_per_sample = -1;
- rdev->audio.pin[0].status_bits = 0;
- rdev->audio.pin[0].category_code = 0;
- rdev->audio.pin[0].id = 0;
- /* disable audio. it will be set up later */
- r600_audio_enable(rdev, &rdev->audio.pin[0], false);
-
- return 0;
-}
-
-/*
- * release the audio timer
- * TODO: How to do this correctly on SMP systems?
- */
-void r600_audio_fini(struct radeon_device *rdev)
-{
- if (!rdev->audio.enabled)
- return;
-
- r600_audio_enable(rdev, &rdev->audio.pin[0], false);
-
- rdev->audio.enabled = false;
-}
-
-struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev)
-{
- /* only one pin on 6xx-NI */
- return &rdev->audio.pin[0];
-}
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index fc54224ce87b..aabc343b9a8f 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -124,15 +124,6 @@ int r600_dma_resume(struct radeon_device *rdev)
u32 rb_bufsz;
int r;
- /* Reset dma */
- if (rdev->family >= CHIP_RV770)
- WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
- else
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
-
WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
@@ -241,16 +232,19 @@ int r600_dma_ring_test(struct radeon_device *rdev,
{
unsigned i;
int r;
- void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ unsigned index;
u32 tmp;
+ u64 gpu_addr;
- if (!ptr) {
- DRM_ERROR("invalid vram scratch pointer\n");
- return -EINVAL;
- }
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ index = R600_WB_DMA_RING_TEST_OFFSET;
+ else
+ index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
+
+ gpu_addr = rdev->wb.gpu_addr + index;
tmp = 0xCAFEDEAD;
- writel(tmp, ptr);
+ rdev->wb.wb[index/4] = cpu_to_le32(tmp);
r = radeon_ring_lock(rdev, ring, 4);
if (r) {
@@ -258,13 +252,13 @@ int r600_dma_ring_test(struct radeon_device *rdev,
return r;
}
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
- radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
+ radeon_ring_write(ring, lower_32_bits(gpu_addr));
+ radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = readl(ptr);
+ tmp = le32_to_cpu(rdev->wb.wb[index/4]);
if (tmp == 0xDEADBEEF)
break;
DRM_UDELAY(1);
@@ -470,7 +464,7 @@ struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(sem, resv, false);
+ radeon_semaphore_sync_resv(rdev, sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 9c61b74ef441..f6309bd23e01 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -24,6 +24,7 @@
#include "drmP.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "r600d.h"
#include "r600_dpm.h"
#include "atom.h"
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 26ef8ced6f89..b90dc0eb08e6 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -72,6 +72,169 @@ static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
/*
+ * check if the chipset is supported
+ */
+static int r600_audio_chipset_supported(struct radeon_device *rdev)
+{
+ return ASIC_IS_DCE2(rdev) && !ASIC_IS_NODCE(rdev);
+}
+
+static struct r600_audio_pin r600_audio_status(struct radeon_device *rdev)
+{
+ struct r600_audio_pin status;
+ uint32_t value;
+
+ value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
+
+ /* number of channels */
+ status.channels = (value & 0x7) + 1;
+
+ /* bits per sample */
+ switch ((value & 0xF0) >> 4) {
+ case 0x0:
+ status.bits_per_sample = 8;
+ break;
+ case 0x1:
+ status.bits_per_sample = 16;
+ break;
+ case 0x2:
+ status.bits_per_sample = 20;
+ break;
+ case 0x3:
+ status.bits_per_sample = 24;
+ break;
+ case 0x4:
+ status.bits_per_sample = 32;
+ break;
+ default:
+ dev_err(rdev->dev, "Unknown bits per sample 0x%x, using 16\n",
+ (int)value);
+ status.bits_per_sample = 16;
+ }
+
+ /* current sampling rate in HZ */
+ if (value & 0x4000)
+ status.rate = 44100;
+ else
+ status.rate = 48000;
+ status.rate *= ((value >> 11) & 0x7) + 1;
+ status.rate /= ((value >> 8) & 0x7) + 1;
+
+ value = RREG32(R600_AUDIO_STATUS_BITS);
+
+ /* iec 60958 status bits */
+ status.status_bits = value & 0xff;
+
+ /* iec 60958 category code */
+ status.category_code = (value >> 8) & 0xff;
+
+ return status;
+}
+
+/*
+ * update all hdmi interfaces with current audio parameters
+ */
+void r600_audio_update_hdmi(struct work_struct *work)
+{
+ struct radeon_device *rdev = container_of(work, struct radeon_device,
+ audio_work);
+ struct drm_device *dev = rdev->ddev;
+ struct r600_audio_pin audio_status = r600_audio_status(rdev);
+ struct drm_encoder *encoder;
+ bool changed = false;
+
+ if (rdev->audio.pin[0].channels != audio_status.channels ||
+ rdev->audio.pin[0].rate != audio_status.rate ||
+ rdev->audio.pin[0].bits_per_sample != audio_status.bits_per_sample ||
+ rdev->audio.pin[0].status_bits != audio_status.status_bits ||
+ rdev->audio.pin[0].category_code != audio_status.category_code) {
+ rdev->audio.pin[0] = audio_status;
+ changed = true;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (!radeon_encoder_is_digital(encoder))
+ continue;
+ if (changed || r600_hdmi_buffer_status_changed(encoder))
+ r600_hdmi_update_audio_settings(encoder);
+ }
+}
+
+/* enable the audio stream */
+void r600_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ u8 enable_mask)
+{
+ u32 tmp = RREG32(AZ_HOT_PLUG_CONTROL);
+
+ if (!pin)
+ return;
+
+ if (enable_mask) {
+ tmp |= AUDIO_ENABLED;
+ if (enable_mask & 1)
+ tmp |= PIN0_AUDIO_ENABLED;
+ if (enable_mask & 2)
+ tmp |= PIN1_AUDIO_ENABLED;
+ if (enable_mask & 4)
+ tmp |= PIN2_AUDIO_ENABLED;
+ if (enable_mask & 8)
+ tmp |= PIN3_AUDIO_ENABLED;
+ } else {
+ tmp &= ~(AUDIO_ENABLED |
+ PIN0_AUDIO_ENABLED |
+ PIN1_AUDIO_ENABLED |
+ PIN2_AUDIO_ENABLED |
+ PIN3_AUDIO_ENABLED);
+ }
+
+ WREG32(AZ_HOT_PLUG_CONTROL, tmp);
+}
+
+/*
+ * initialize the audio vars
+ */
+int r600_audio_init(struct radeon_device *rdev)
+{
+ if (!radeon_audio || !r600_audio_chipset_supported(rdev))
+ return 0;
+
+ rdev->audio.enabled = true;
+
+ rdev->audio.num_pins = 1;
+ rdev->audio.pin[0].channels = -1;
+ rdev->audio.pin[0].rate = -1;
+ rdev->audio.pin[0].bits_per_sample = -1;
+ rdev->audio.pin[0].status_bits = 0;
+ rdev->audio.pin[0].category_code = 0;
+ rdev->audio.pin[0].id = 0;
+ /* disable audio. it will be set up later */
+ r600_audio_enable(rdev, &rdev->audio.pin[0], 0);
+
+ return 0;
+}
+
+/*
+ * release the audio timer
+ * TODO: How to do this correctly on SMP systems?
+ */
+void r600_audio_fini(struct radeon_device *rdev)
+{
+ if (!rdev->audio.enabled)
+ return;
+
+ r600_audio_enable(rdev, &rdev->audio.pin[0], 0);
+
+ rdev->audio.enabled = false;
+}
+
+struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev)
+{
+ /* only one pin on 6xx-NI */
+ return &rdev->audio.pin[0];
+}
+
+/*
* calculate CTS and N values if they are not found in the table
*/
static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int *N, int freq)
@@ -357,7 +520,7 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
/* disable audio prior to setting up hw */
dig->afmt->pin = r600_audio_get_pin(rdev);
- r600_audio_enable(rdev, dig->afmt->pin, false);
+ r600_audio_enable(rdev, dig->afmt->pin, 0xf);
r600_audio_set_dto(encoder, mode->clock);
@@ -443,7 +606,7 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
/* enable audio after to setting up hw */
- r600_audio_enable(rdev, dig->afmt->pin, true);
+ r600_audio_enable(rdev, dig->afmt->pin, 0xf);
}
/**
@@ -528,6 +691,11 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
if (!enable && !dig->afmt->enabled)
return;
+ if (!enable && dig->afmt->pin) {
+ r600_audio_enable(rdev, dig->afmt->pin, 0);
+ dig->afmt->pin = NULL;
+ }
+
/* Older chipsets require setting HDMI and routing manually */
if (!ASIC_IS_DCE3(rdev)) {
if (enable)
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 671b48032a3d..1e8495cca41e 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -44,13 +44,6 @@
#define R6XX_MAX_PIPES 8
#define R6XX_MAX_PIPES_MASK 0xff
-/* PTE flags */
-#define PTE_VALID (1 << 0)
-#define PTE_SYSTEM (1 << 1)
-#define PTE_SNOOPED (1 << 2)
-#define PTE_READABLE (1 << 5)
-#define PTE_WRITEABLE (1 << 6)
-
/* tiling bits */
#define ARRAY_LINEAR_GENERAL 0x00000000
#define ARRAY_LINEAR_ALIGNED 0x00000001
@@ -934,6 +927,23 @@
# define TARGET_LINK_SPEED_MASK (0xf << 0)
# define SELECTABLE_DEEMPHASIS (1 << 6)
+/* Audio */
+#define AZ_HOT_PLUG_CONTROL 0x7300
+# define AZ_FORCE_CODEC_WAKE (1 << 0)
+# define JACK_DETECTION_ENABLE (1 << 4)
+# define UNSOLICITED_RESPONSE_ENABLE (1 << 8)
+# define CODEC_HOT_PLUG_ENABLE (1 << 12)
+# define AUDIO_ENABLED (1 << 31)
+/* DCE3 adds */
+# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
+# define PIN1_JACK_DETECTION_ENABLE (1 << 5)
+# define PIN2_JACK_DETECTION_ENABLE (1 << 6)
+# define PIN3_JACK_DETECTION_ENABLE (1 << 7)
+# define PIN0_AUDIO_ENABLED (1 << 24)
+# define PIN1_AUDIO_ENABLED (1 << 25)
+# define PIN2_AUDIO_ENABLED (1 << 26)
+# define PIN3_AUDIO_ENABLED (1 << 27)
+
/* Audio clocks DCE 2.0/3.0 */
#define AUDIO_DTO 0x7340
# define AUDIO_DTO_PHASE(x) (((x) & 0xffff) << 0)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index ef91ebb7c671..a9717b3fbf1b 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -110,6 +110,7 @@ extern int radeon_vm_block_size;
extern int radeon_deep_color;
extern int radeon_use_pflipirq;
extern int radeon_bapm;
+extern int radeon_backlight;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -589,9 +590,10 @@ bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
void radeon_semaphore_sync_fence(struct radeon_semaphore *semaphore,
struct radeon_fence *fence);
-void radeon_semaphore_sync_resv(struct radeon_semaphore *semaphore,
- struct reservation_object *resv,
- bool shared);
+int radeon_semaphore_sync_resv(struct radeon_device *rdev,
+ struct radeon_semaphore *semaphore,
+ struct reservation_object *resv,
+ bool shared);
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
int waiting_ring);
@@ -712,7 +714,7 @@ struct radeon_flip_work {
uint64_t base;
struct drm_pending_vblank_event *event;
struct radeon_bo *old_rbo;
- struct radeon_fence *fence;
+ struct fence *fence;
};
struct r500_irq_stat_regs {
@@ -1131,6 +1133,8 @@ struct radeon_wb {
#define R600_WB_EVENT_OFFSET 3072
#define CIK_WB_CP1_WPTR_OFFSET 3328
#define CIK_WB_CP2_WPTR_OFFSET 3584
+#define R600_WB_DMA_RING_TEST_OFFSET 3588
+#define CAYMAN_WB_DMA1_RING_TEST_OFFSET 3592
/**
* struct radeon_pm - power management datas
@@ -2977,10 +2981,10 @@ struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
void r600_audio_enable(struct radeon_device *rdev,
struct r600_audio_pin *pin,
- bool enable);
+ u8 enable_mask);
void dce6_audio_enable(struct radeon_device *rdev,
struct r600_audio_pin *pin,
- bool enable);
+ u8 enable_mask);
/*
* R600 vram scratch functions
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index d91f965e8219..850de57069be 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -185,7 +185,6 @@ static struct radeon_asic_ring r100_gfx_ring = {
.get_rptr = &r100_gfx_get_rptr,
.get_wptr = &r100_gfx_get_wptr,
.set_wptr = &r100_gfx_set_wptr,
- .hdp_flush = &r100_ring_hdp_flush,
};
static struct radeon_asic r100_asic = {
@@ -332,7 +331,6 @@ static struct radeon_asic_ring r300_gfx_ring = {
.get_rptr = &r100_gfx_get_rptr,
.get_wptr = &r100_gfx_get_wptr,
.set_wptr = &r100_gfx_set_wptr,
- .hdp_flush = &r100_ring_hdp_flush,
};
static struct radeon_asic r300_asic = {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index ca01bb8ea217..d8ace5b28a5b 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -148,8 +148,7 @@ u32 r100_gfx_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
void r100_gfx_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
-void r100_ring_hdp_flush(struct radeon_device *rdev,
- struct radeon_ring *ring);
+
/*
* r200,rv250,rs300,rv280
*/
@@ -392,7 +391,6 @@ void r600_disable_interrupts(struct radeon_device *rdev);
void r600_rlc_stop(struct radeon_device *rdev);
/* r600 audio */
int r600_audio_init(struct radeon_device *rdev);
-struct r600_audio_pin r600_audio_status(struct radeon_device *rdev);
void r600_audio_fini(struct radeon_device *rdev);
void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock);
void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index e74c7e387dde..df69b92ba164 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -458,7 +458,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
return true;
}
-const int supported_devices_connector_convert[] = {
+static const int supported_devices_connector_convert[] = {
DRM_MODE_CONNECTOR_Unknown,
DRM_MODE_CONNECTOR_VGA,
DRM_MODE_CONNECTOR_DVII,
@@ -477,7 +477,7 @@ const int supported_devices_connector_convert[] = {
DRM_MODE_CONNECTOR_DisplayPort
};
-const uint16_t supported_devices_connector_object_id_convert[] = {
+static const uint16_t supported_devices_connector_object_id_convert[] = {
CONNECTOR_OBJECT_ID_NONE,
CONNECTOR_OBJECT_ID_VGA,
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, /* not all boards support DL */
@@ -494,7 +494,7 @@ const uint16_t supported_devices_connector_object_id_convert[] = {
CONNECTOR_OBJECT_ID_SVIDEO
};
-const int object_connector_convert[] = {
+static const int object_connector_convert[] = {
DRM_MODE_CONNECTOR_Unknown,
DRM_MODE_CONNECTOR_DVII,
DRM_MODE_CONNECTOR_DVII,
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index a9fb0d016d38..8bc7d0bbd3c8 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -33,7 +33,6 @@ static struct radeon_atpx_priv {
bool atpx_detected;
/* handle for device - and atpx */
acpi_handle dhandle;
- acpi_handle other_handle;
struct radeon_atpx atpx;
} radeon_atpx_priv;
@@ -453,10 +452,9 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
return false;
status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
- if (ACPI_FAILURE(status)) {
- radeon_atpx_priv.other_handle = dhandle;
+ if (ACPI_FAILURE(status))
return false;
- }
+
radeon_atpx_priv.dhandle = dhandle;
radeon_atpx_priv.atpx.handle = atpx_handle;
return true;
@@ -540,16 +538,6 @@ static bool radeon_atpx_detect(void)
printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
acpi_method_name);
radeon_atpx_priv.atpx_detected = true;
- /*
- * On some systems hotplug events are generated for the device
- * being switched off when ATPX is executed. They cause ACPI
- * hotplug to trigger and attempt to remove the device from
- * the system, which causes it to break down. Prevent that from
- * happening by setting the no_hotplug flag for the involved
- * ACPI device objects.
- */
- acpi_bus_no_hotplug(radeon_atpx_priv.dhandle);
- acpi_bus_no_hotplug(radeon_atpx_priv.other_handle);
return true;
}
return false;
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 1e8855060fc7..9e7f23dd14bd 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -93,7 +93,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
int time;
n = RADEON_BENCHMARK_ITERATIONS;
- r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, NULL, &sobj);
if (r) {
goto out_cleanup;
}
@@ -105,7 +105,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
if (r) {
goto out_cleanup;
}
- r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, 0, NULL, NULL, &dobj);
if (r) {
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 6651177110f0..3e5f6b71f3ad 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -116,7 +116,7 @@ enum radeon_combios_connector {
CONNECTOR_UNSUPPORTED_LEGACY
};
-const int legacy_connector_convert[] = {
+static const int legacy_connector_convert[] = {
DRM_MODE_CONNECTOR_Unknown,
DRM_MODE_CONNECTOR_DVID,
DRM_MODE_CONNECTOR_VGA,
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index f662de41ba49..1c893447d7cd 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -249,9 +249,9 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
return 0;
}
-static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
+static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
{
- int i;
+ int i, r = 0;
for (i = 0; i < p->nrelocs; i++) {
struct reservation_object *resv;
@@ -260,9 +260,13 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
continue;
resv = p->relocs[i].robj->tbo.resv;
- radeon_semaphore_sync_resv(p->ib.semaphore, resv,
- p->relocs[i].tv.shared);
+ r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv,
+ p->relocs[i].tv.shared);
+
+ if (r)
+ break;
}
+ return r;
}
/* XXX: note that this is called from the legacy UMS CS ioctl as well */
@@ -472,13 +476,19 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
return r;
}
+ r = radeon_cs_sync_rings(parser);
+ if (r) {
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to sync rings: %i\n", r);
+ return r;
+ }
+
if (parser->ring == R600_RING_TYPE_UVD_INDEX)
radeon_uvd_note_usage(rdev);
else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
(parser->ring == TN_RING_TYPE_VCE2_INDEX))
radeon_vce_note_usage(rdev);
- radeon_cs_sync_rings(parser);
r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
if (r) {
DRM_ERROR("Failed to schedule IB !\n");
@@ -565,7 +575,13 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
if (r) {
goto out;
}
- radeon_cs_sync_rings(parser);
+
+ r = radeon_cs_sync_rings(parser);
+ if (r) {
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to sync rings: %i\n", r);
+ goto out;
+ }
radeon_semaphore_sync_fence(parser->ib.semaphore, vm->fence);
if ((rdev->family >= CHIP_TAHITI) &&
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index e84a76e6656a..ea2676954dde 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -123,6 +123,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
* https://bugzilla.kernel.org/show_bug.cgi?id=51381
*/
{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
+ /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
+ * https://bugzilla.kernel.org/show_bug.cgi?id=51381
+ */
+ { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
/* macbook pro 8.2 */
{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
{ 0, 0, 0, 0, 0 },
@@ -430,7 +434,7 @@ int radeon_wb_init(struct radeon_device *rdev)
if (rdev->wb.wb_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, 0, NULL,
+ RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
&rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
@@ -1126,7 +1130,7 @@ static void radeon_check_arguments(struct radeon_device *rdev)
if (radeon_vm_block_size == -1) {
/* Total bits covered by PD + PTs */
- unsigned bits = ilog2(radeon_vm_size) + 17;
+ unsigned bits = ilog2(radeon_vm_size) + 18;
/* Make sure the PD is 4K in size up to 8GB address space.
Above that split equal between PD and PTs */
@@ -1396,7 +1400,7 @@ int radeon_device_init(struct radeon_device *rdev,
r = radeon_init(rdev);
if (r)
- return r;
+ goto failed;
r = radeon_gem_debugfs_init(rdev);
if (r) {
@@ -1412,7 +1416,7 @@ int radeon_device_init(struct radeon_device *rdev,
radeon_agp_disable(rdev);
r = radeon_init(rdev);
if (r)
- return r;
+ goto failed;
}
r = radeon_ib_ring_tests(rdev);
@@ -1438,6 +1442,11 @@ int radeon_device_init(struct radeon_device *rdev,
DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
}
return 0;
+
+failed:
+ if (runtime)
+ vga_switcheroo_fini_domain_pm_ops(rdev->dev);
+ return r;
}
static void radeon_debugfs_remove_files(struct radeon_device *rdev);
@@ -1458,6 +1467,8 @@ void radeon_device_fini(struct radeon_device *rdev)
radeon_bo_evict_vram(rdev);
radeon_fini(rdev);
vga_switcheroo_unregister_client(rdev->pdev);
+ if (rdev->flags & RADEON_IS_PX)
+ vga_switcheroo_fini_domain_pm_ops(rdev->dev);
vga_client_register(rdev->pdev, NULL, NULL, NULL);
if (rdev->rio_mem)
pci_iounmap(rdev->pdev, rdev->rio_mem);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 4eb37976f879..00ead8c2758a 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -402,14 +402,21 @@ static void radeon_flip_work_func(struct work_struct *__work)
down_read(&rdev->exclusive_lock);
if (work->fence) {
- r = radeon_fence_wait(work->fence, false);
- if (r == -EDEADLK) {
- up_read(&rdev->exclusive_lock);
- do {
- r = radeon_gpu_reset(rdev);
- } while (r == -EAGAIN);
- down_read(&rdev->exclusive_lock);
- }
+ struct radeon_fence *fence;
+
+ fence = to_radeon_fence(work->fence);
+ if (fence && fence->rdev == rdev) {
+ r = radeon_fence_wait(fence, false);
+ if (r == -EDEADLK) {
+ up_read(&rdev->exclusive_lock);
+ do {
+ r = radeon_gpu_reset(rdev);
+ } while (r == -EAGAIN);
+ down_read(&rdev->exclusive_lock);
+ }
+ } else
+ r = fence_wait(work->fence, false);
+
if (r)
DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
@@ -418,7 +425,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
* confused about which BO the CRTC is scanning out
*/
- radeon_fence_unref(&work->fence);
+ fence_put(work->fence);
+ work->fence = NULL;
}
/* We borrow the event spin lock for protecting flip_status */
@@ -494,7 +502,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
- work->fence = (struct radeon_fence *)fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
+ work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
radeon_bo_unreserve(new_rbo);
@@ -576,7 +584,7 @@ pflip_cleanup:
cleanup:
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
- radeon_fence_unref(&work->fence);
+ fence_put(work->fence);
kfree(work);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 69c6a835bcd5..dcffa30ee2db 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -85,7 +85,7 @@
* CIK: 1D and linear tiling modes contain valid PIPE_CONFIG
* 2.39.0 - Add INFO query for number of active CUs
* 2.40.0 - Add RADEON_GEM_GTT_WC/UC, flush HDP cache before submitting
- * CS to GPU
+ * CS to GPU on >= r600
*/
#define KMS_DRIVER_MAJOR 2
#define KMS_DRIVER_MINOR 40
@@ -186,6 +186,7 @@ int radeon_vm_block_size = -1;
int radeon_deep_color = 0;
int radeon_use_pflipirq = 2;
int radeon_bapm = -1;
+int radeon_backlight = -1;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -268,6 +269,9 @@ module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444);
MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(bapm, radeon_bapm, int, 0444);
+MODULE_PARM_DESC(backlight, "backlight support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(backlight, radeon_backlight, int, 0444);
+
static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
@@ -446,6 +450,7 @@ static int radeon_pmops_runtime_suspend(struct device *dev)
ret = radeon_suspend_kms(drm_dev, false, false);
pci_save_state(pdev);
pci_disable_device(pdev);
+ pci_ignore_hotplug(pdev);
pci_set_power_state(pdev, PCI_D3cold);
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 3c2094c25b53..9a19e52cc655 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -158,10 +158,43 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8
return ret;
}
+static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = radeon_encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ bool use_bl = false;
+
+ if (!(radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)))
+ return;
+
+ if (radeon_backlight == 0) {
+ return;
+ } else if (radeon_backlight == 1) {
+ use_bl = true;
+ } else if (radeon_backlight == -1) {
+ /* Quirks */
+ /* Amilo Xi 2550 only works with acpi bl */
+ if ((rdev->pdev->device == 0x9583) &&
+ (rdev->pdev->subsystem_vendor == 0x1734) &&
+ (rdev->pdev->subsystem_device == 0x1107))
+ use_bl = false;
+ else
+ use_bl = true;
+ }
+
+ if (use_bl) {
+ if (rdev->is_atom_bios)
+ radeon_atom_backlight_init(radeon_encoder, connector);
+ else
+ radeon_legacy_backlight_init(radeon_encoder, connector);
+ rdev->mode_info.bl_encoder = radeon_encoder;
+ }
+}
+
void
radeon_link_encoder_connector(struct drm_device *dev)
{
- struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct drm_encoder *encoder;
@@ -174,13 +207,8 @@ radeon_link_encoder_connector(struct drm_device *dev)
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->devices & radeon_connector->devices) {
drm_mode_connector_attach_encoder(connector, encoder);
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- if (rdev->is_atom_bios)
- radeon_atom_backlight_init(radeon_encoder, connector);
- else
- radeon_legacy_backlight_init(radeon_encoder, connector);
- rdev->mode_info.bl_encoder = radeon_encoder;
- }
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ radeon_encoder_add_backlight(radeon_encoder, connector);
}
}
}
@@ -382,3 +410,24 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
}
}
+bool radeon_encoder_is_digital(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+ return true;
+ default:
+ return false;
+ }
+}
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index af9f2d6bd7d0..995167025282 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -541,6 +541,15 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
uint64_t seq[RADEON_NUM_RINGS] = {};
long r;
+ /*
+ * This function should not be called on !radeon fences.
+ * If this is the case, it would mean this function can
+ * also be called on radeon fences belonging to another card.
+ * exclusive_lock is not held in that case.
+ */
+ if (WARN_ON_ONCE(!to_radeon_fence(&fence->base)))
+ return fence_wait(&fence->base, intr);
+
seq[fence->ring] = fence->seq;
r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
if (r < 0) {
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index a053a0779aac..84146d5901aa 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -128,7 +128,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
if (rdev->gart.robj == NULL) {
r = radeon_bo_create(rdev, rdev->gart.table_size,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- 0, NULL, &rdev->gart.robj);
+ 0, NULL, NULL, &rdev->gart.robj);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 4b7c8ec36c2f..c194497aa586 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -67,7 +67,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
retry:
r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
- flags, NULL, &robj);
+ flags, NULL, NULL, &robj);
if (r) {
if (r != -ERESTARTSYS) {
if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index e27608c29c11..04db2fdd8692 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -777,6 +777,7 @@ extern void atombios_digital_setup(struct drm_encoder *encoder, int action);
extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
extern bool atombios_set_edp_panel_power(struct drm_connector *connector, int action);
extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
+extern bool radeon_encoder_is_digital(struct drm_encoder *encoder);
extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 0e82f0223fd4..99a960a4f302 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -167,8 +167,10 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
}
int radeon_bo_create(struct radeon_device *rdev,
- unsigned long size, int byte_align, bool kernel, u32 domain,
- u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr)
+ unsigned long size, int byte_align, bool kernel,
+ u32 domain, u32 flags, struct sg_table *sg,
+ struct reservation_object *resv,
+ struct radeon_bo **bo_ptr)
{
struct radeon_bo *bo;
enum ttm_bo_type type;
@@ -216,7 +218,7 @@ int radeon_bo_create(struct radeon_device *rdev,
down_read(&rdev->pm.mclk_lock);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL,
- acc_size, sg, NULL, &radeon_ttm_bo_destroy);
+ acc_size, sg, resv, &radeon_ttm_bo_destroy);
up_read(&rdev->pm.mclk_lock);
if (unlikely(r != 0)) {
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 98a47fdf3625..1b8ec7917154 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -126,6 +126,7 @@ extern int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u32 flags,
struct sg_table *sg,
+ struct reservation_object *resv,
struct radeon_bo **bo_ptr);
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
extern void radeon_bo_kunmap(struct radeon_bo *bo);
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 171daf7fc483..f3609c97496b 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -61,12 +61,15 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sg)
{
+ struct reservation_object *resv = attach->dmabuf->resv;
struct radeon_device *rdev = dev->dev_private;
struct radeon_bo *bo;
int ret;
+ ww_mutex_lock(&resv->lock, NULL);
ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false,
- RADEON_GEM_DOMAIN_GTT, 0, sg, &bo);
+ RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
+ ww_mutex_unlock(&resv->lock);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 6f2a9bd6bb54..3d17af34afa7 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -383,7 +383,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, 0,
+ RADEON_GEM_DOMAIN_GTT, 0, NULL,
NULL, &ring->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring create failed\n", r);
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index b84f97c8718c..c507896aca45 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -65,7 +65,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
}
r = radeon_bo_create(rdev, size, align, true,
- domain, flags, NULL, &sa_manager->bo);
+ domain, flags, NULL, NULL, &sa_manager->bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 4d4b0773638a..6deb08f045b7 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -124,27 +124,42 @@ void radeon_semaphore_sync_fence(struct radeon_semaphore *semaphore,
*
* Sync to the fence using this semaphore object
*/
-void radeon_semaphore_sync_resv(struct radeon_semaphore *sema,
- struct reservation_object *resv,
- bool shared)
+int radeon_semaphore_sync_resv(struct radeon_device *rdev,
+ struct radeon_semaphore *sema,
+ struct reservation_object *resv,
+ bool shared)
{
struct reservation_object_list *flist;
struct fence *f;
+ struct radeon_fence *fence;
unsigned i;
+ int r = 0;
/* always sync to the exclusive fence */
f = reservation_object_get_excl(resv);
- radeon_semaphore_sync_fence(sema, (struct radeon_fence*)f);
+ fence = f ? to_radeon_fence(f) : NULL;
+ if (fence && fence->rdev == rdev)
+ radeon_semaphore_sync_fence(sema, fence);
+ else if (f)
+ r = fence_wait(f, true);
flist = reservation_object_get_list(resv);
- if (shared || !flist)
- return;
+ if (shared || !flist || r)
+ return r;
for (i = 0; i < flist->shared_count; ++i) {
f = rcu_dereference_protected(flist->shared[i],
reservation_object_held(resv));
- radeon_semaphore_sync_fence(sema, (struct radeon_fence*)f);
+ fence = to_radeon_fence(f);
+ if (fence && fence->rdev == rdev)
+ radeon_semaphore_sync_fence(sema, fence);
+ else
+ r = fence_wait(f, true);
+
+ if (r)
+ break;
}
+ return r;
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index ce943e1a5e51..07b506b41008 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -67,7 +67,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
}
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- 0, NULL, &vram_obj);
+ 0, NULL, NULL, &vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup;
@@ -87,7 +87,8 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
struct radeon_fence *fence = NULL;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
+ RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
+ gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_lclean;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 738a2f248b36..8624979afb65 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -865,7 +865,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, 0,
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL,
NULL, &rdev->stollen_vga_memory);
if (r) {
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index ba4f38916026..11b662469253 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -141,7 +141,8 @@ int radeon_uvd_init(struct radeon_device *rdev)
RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE +
RADEON_GPU_PAGE_SIZE;
r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->uvd.vcpu_bo);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+ NULL, &rdev->uvd.vcpu_bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index c7190aadbd89..9e85757d5599 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -126,7 +126,8 @@ int radeon_vce_init(struct radeon_device *rdev)
size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) +
RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->vce.vcpu_bo);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL, NULL,
+ &rdev->vce.vcpu_bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index ce870959dff8..4532cc76a0a6 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -548,7 +548,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
RADEON_GPU_PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, 0, NULL, &pt);
+ RADEON_GEM_DOMAIN_VRAM, 0,
+ NULL, NULL, &pt);
if (r)
return r;
@@ -698,7 +699,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
if (ib.length_dw != 0) {
radeon_asic_vm_pad_ib(rdev, &ib);
- radeon_semaphore_sync_resv(ib.semaphore, pd->tbo.resv, false);
+ radeon_semaphore_sync_resv(rdev, ib.semaphore, pd->tbo.resv, false);
radeon_semaphore_sync_fence(ib.semaphore, vm->last_id_use);
WARN_ON(ib.length_dw > ndw);
r = radeon_ib_schedule(rdev, &ib, NULL, false);
@@ -825,7 +826,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
unsigned nptes;
uint64_t pte;
- radeon_semaphore_sync_resv(ib->semaphore, pt->tbo.resv, false);
+ radeon_semaphore_sync_resv(rdev, ib->semaphore, pt->tbo.resv, false);
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
@@ -1127,7 +1128,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
r = radeon_bo_create(rdev, pd_size, align, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
- &vm->page_directory);
+ NULL, &vm->page_directory);
if (r)
return r;
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 6c1fc339d228..c5799f16aa4b 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -221,9 +221,9 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
entry = (lower_32_bits(addr) & PAGE_MASK) |
((upper_32_bits(addr) & 0xff) << 4);
if (flags & RADEON_GART_PAGE_READ)
- addr |= RS400_PTE_READABLE;
+ entry |= RS400_PTE_READABLE;
if (flags & RADEON_GART_PAGE_WRITE)
- addr |= RS400_PTE_WRITEABLE;
+ entry |= RS400_PTE_WRITEABLE;
if (!(flags & RADEON_GART_PAGE_SNOOP))
entry |= RS400_PTE_UNSNOOPED;
entry = cpu_to_le32(entry);
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index 02f7710de470..9031f4b69824 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -24,6 +24,7 @@
#include "drmP.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "rs780d.h"
#include "r600_dpm.h"
#include "rs780_dpm.h"
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index e7045b085715..6a5c233361e9 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -24,6 +24,7 @@
#include "drmP.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "rv6xxd.h"
#include "r600_dpm.h"
#include "rv6xx_dpm.h"
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index d9f5ce715c9b..372016e266d0 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -26,7 +26,6 @@
* Jerome Glisse
*/
#include <linux/firmware.h>
-#include <linux/platform_device.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
index c112764adfdf..7f34bad2e724 100644
--- a/drivers/gpu/drm/radeon/rv770_dma.c
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -67,7 +67,7 @@ struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(sem, resv, false);
+ radeon_semaphore_sync_resv(rdev, sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 3c76e1dcdf04..755a8f96fe46 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -24,6 +24,7 @@
#include "drmP.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "rv770d.h"
#include "r600_dpm.h"
#include "rv770_dpm.h"
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 6bce40847753..eeea5b6a1775 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -4684,7 +4684,7 @@ static int si_vm_packet3_compute_check(struct radeon_device *rdev,
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
{
int ret = 0;
- u32 idx = 0;
+ u32 idx = 0, i;
struct radeon_cs_packet pkt;
do {
@@ -4695,6 +4695,12 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
switch (pkt.type) {
case RADEON_PACKET_TYPE0:
dev_err(rdev->dev, "Packet0 not allowed!\n");
+ for (i = 0; i < ib->length_dw; i++) {
+ if (i == idx)
+ printk("\t0x%08x <---\n", ib->ptr[i]);
+ else
+ printk("\t0x%08x\n", ib->ptr[i]);
+ }
ret = -EINVAL;
break;
case RADEON_PACKET_TYPE2:
@@ -6316,17 +6322,17 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) {
+ wptr &= ~RB_OVERFLOW;
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 16). Hopefully
* this should allow us to catchup.
*/
- dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
- wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
- wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
@@ -6664,13 +6670,13 @@ restart_ih:
/* wptr/rptr are in bytes! */
rptr += 16;
rptr &= rdev->ih.ptr_mask;
+ WREG32(IH_RB_RPTR, rptr);
}
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
if (queue_thermal && rdev->pm.dpm_enabled)
schedule_work(&rdev->pm.dpm.thermal.work);
rdev->ih.rptr = rptr;
- WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0);
/* make sure wptr hasn't changed while processing */
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 9b0dfbc913f3..b58f12b762d7 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -252,7 +252,7 @@ struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
return ERR_PTR(r);
}
- radeon_semaphore_sync_resv(sem, resv, false);
+ radeon_semaphore_sync_resv(rdev, sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 70e61ffeace2..a53c2e79d9cb 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -23,6 +23,7 @@
#include "drmP.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "sid.h"
#include "r600_dpm.h"
#include "si_dpm.h"
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index fd414d34d885..6635da9ec986 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -736,7 +736,7 @@
# define DESCRIPTION16(x) (((x) & 0xff) << 0)
# define DESCRIPTION17(x) (((x) & 0xff) << 8)
-#define AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL 0x54
+#define AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x54
# define AUDIO_ENABLED (1 << 31)
#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x56
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 3f0e8d7b8dbe..1f8a8833e1be 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -23,6 +23,7 @@
#include "drmP.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "sumod.h"
#include "r600_dpm.h"
#include "cypress_dpm.h"
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 57f780053b3e..b4ec5c4e7969 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -23,6 +23,7 @@
#include "drmP.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "trinityd.h"
#include "r600_dpm.h"
#include "trinity_dpm.h"
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index ef93156a69c6..b22968c08d1f 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -298,7 +298,6 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD2(HDMI_IFRAME_SLOT_AVI));
val = frame[0xC];
- val |= frame[0xD] << 8;
hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD3(HDMI_IFRAME_SLOT_AVI));
/* Enable transmission slot for AVI infoframe
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 8f5cec67c47d..d395b0bef73b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -709,6 +709,7 @@ out:
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
uint32_t mem_type,
+ const struct ttm_place *place,
bool interruptible,
bool no_wait_gpu)
{
@@ -720,8 +721,21 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &man->lru, lru) {
ret = __ttm_bo_reserve(bo, false, true, false, NULL);
- if (!ret)
+ if (!ret) {
+ if (place && (place->fpfn || place->lpfn)) {
+ /* Don't evict this BO if it's outside of the
+ * requested placement range
+ */
+ if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
+ (place->lpfn && place->lpfn <= bo->mem.start)) {
+ __ttm_bo_unreserve(bo);
+ ret = -EBUSY;
+ continue;
+ }
+ }
+
break;
+ }
}
if (ret) {
@@ -782,7 +796,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ret;
if (mem->mm_node)
break;
- ret = ttm_mem_evict_first(bdev, mem_type,
+ ret = ttm_mem_evict_first(bdev, mem_type, place,
interruptible, no_wait_gpu);
if (unlikely(ret != 0))
return ret;
@@ -994,9 +1008,9 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement,
for (i = 0; i < placement->num_placement; i++) {
const struct ttm_place *heap = &placement->placement[i];
- if (mem->mm_node && heap->lpfn != 0 &&
+ if (mem->mm_node &&
(mem->start < heap->fpfn ||
- mem->start + mem->num_pages > heap->lpfn))
+ (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
continue;
*new_flags = heap->flags;
@@ -1007,9 +1021,9 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement,
for (i = 0; i < placement->num_busy_placement; i++) {
const struct ttm_place *heap = &placement->busy_placement[i];
- if (mem->mm_node && heap->lpfn != 0 &&
+ if (mem->mm_node &&
(mem->start < heap->fpfn ||
- mem->start + mem->num_pages > heap->lpfn))
+ (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
continue;
*new_flags = heap->flags;
@@ -1233,7 +1247,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
while (!list_empty(&man->lru)) {
spin_unlock(&glob->lru_lock);
- ret = ttm_mem_evict_first(bdev, mem_type, false, false);
+ ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
if (ret) {
if (allow_errors) {
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h
index 11323dd5196f..e4259c2c1acc 100644
--- a/drivers/gpu/drm/vmwgfx/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga_reg.h
@@ -35,7 +35,6 @@
/*
* PCI device IDs.
*/
-#define PCI_VENDOR_ID_VMWARE 0x15AD
#define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405
/*
diff --git a/drivers/gpu/ipu-v3/Kconfig b/drivers/gpu/ipu-v3/Kconfig
index 2f228a2f2a48..aefdff95356d 100644
--- a/drivers/gpu/ipu-v3/Kconfig
+++ b/drivers/gpu/ipu-v3/Kconfig
@@ -1,7 +1,8 @@
config IMX_IPUV3_CORE
tristate "IPUv3 core support"
- depends on SOC_IMX5 || SOC_IMX6Q || SOC_IMX6SL || ARCH_MULTIPLATFORM
+ depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM
depends on RESET_CONTROLLER
+ select GENERIC_IRQ_CHIP
help
Choose this if you have a i.MX5/6 system and want to use the Image
Processing Unit. This option only enables IPU base support.
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index df65d2bca522..f707d25ae78f 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1060,8 +1060,10 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
id++, &reg->pdata, sizeof(reg->pdata));
}
- if (IS_ERR(pdev))
+ if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
goto err_register;
+ }
}
return 0;
diff --git a/drivers/gpu/ipu-v3/ipu-smfc.c b/drivers/gpu/ipu-v3/ipu-smfc.c
index 6ca9b43ce25a..4ef910991413 100644
--- a/drivers/gpu/ipu-v3/ipu-smfc.c
+++ b/drivers/gpu/ipu-v3/ipu-smfc.c
@@ -8,7 +8,6 @@
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
-#define DEBUG
#include <linux/export.h>
#include <linux/types.h>
#include <linux/init.h>
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 6866448083b2..37ac7b5dbd06 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -660,6 +660,12 @@ int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *
}
EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_ops);
+void vga_switcheroo_fini_domain_pm_ops(struct device *dev)
+{
+ dev->pm_domain = NULL;
+}
+EXPORT_SYMBOL(vga_switcheroo_fini_domain_pm_ops);
+
static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index d2077f040f3e..7bcbf863656e 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -41,6 +41,7 @@
#include <linux/poll.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
+#include <linux/screen_info.h>
#include <linux/uaccess.h>
@@ -112,10 +113,8 @@ both:
return 1;
}
-#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
/* this is only used a cookie - it should not be dereferenced */
static struct pci_dev *vga_default;
-#endif
static void vga_arb_device_card_gone(struct pci_dev *pdev);
@@ -131,7 +130,6 @@ static struct vga_device *vgadev_find(struct pci_dev *pdev)
}
/* Returns the default VGA device (vgacon's babe) */
-#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
struct pci_dev *vga_default_device(void)
{
return vga_default;
@@ -147,7 +145,6 @@ void vga_set_default_device(struct pci_dev *pdev)
pci_dev_put(vga_default);
vga_default = pci_dev_get(pdev);
}
-#endif
static inline void vga_irq_set_state(struct vga_device *vgadev, bool state)
{
@@ -403,7 +400,6 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
}
schedule();
remove_wait_queue(&vga_wait_queue, &wait);
- set_current_state(TASK_RUNNING);
}
return rc;
}
@@ -583,11 +579,12 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
/* Deal with VGA default device. Use first enabled one
* by default if arch doesn't have it's own hook
*/
-#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
if (vga_default == NULL &&
- ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK))
+ ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) {
+ pr_info("vgaarb: setting as boot device: PCI:%s\n",
+ pci_name(pdev));
vga_set_default_device(pdev);
-#endif
+ }
vga_arbiter_check_bridge_sharing(vgadev);
@@ -621,10 +618,8 @@ static bool vga_arbiter_del_pci_device(struct pci_dev *pdev)
goto bail;
}
-#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
if (vga_default == pdev)
vga_set_default_device(NULL);
-#endif
if (vgadev->decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
vga_decode_count--;
@@ -1320,6 +1315,38 @@ static int __init vga_arb_device_init(void)
pr_info("vgaarb: loaded\n");
list_for_each_entry(vgadev, &vga_list, list) {
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+ /* Override I/O based detection done by vga_arbiter_add_pci_device()
+ * as it may take the wrong device (e.g. on Apple system under EFI).
+ *
+ * Select the device owning the boot framebuffer if there is one.
+ */
+ resource_size_t start, end;
+ int i;
+
+ /* Does firmware framebuffer belong to us? */
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ if (!(pci_resource_flags(vgadev->pdev, i) & IORESOURCE_MEM))
+ continue;
+
+ start = pci_resource_start(vgadev->pdev, i);
+ end = pci_resource_end(vgadev->pdev, i);
+
+ if (!start || !end)
+ continue;
+
+ if (screen_info.lfb_base < start ||
+ (screen_info.lfb_base + screen_info.lfb_size) >= end)
+ continue;
+ if (!vga_default_device())
+ pr_info("vgaarb: setting as boot device: PCI:%s\n",
+ pci_name(vgadev->pdev));
+ else if (vgadev->pdev != vga_default_device())
+ pr_info("vgaarb: overriding boot device: PCI:%s\n",
+ pci_name(vgadev->pdev));
+ vga_set_default_device(vgadev->pdev);
+ }
+#endif
if (vgadev->bridge_has_one_vga)
pr_info("vgaarb: bridge control possible %s\n", pci_name(vgadev->pdev));
else