summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c377
1 files changed, 123 insertions, 254 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 8ab5ccbc14ac..bd79d0a31942 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -62,6 +62,8 @@
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
@@ -652,71 +654,6 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
}
/**
- * amdgpu_device_vram_location - try to find VRAM location
- *
- * @adev: amdgpu device structure holding all necessary informations
- * @mc: memory controller structure holding memory informations
- * @base: base address at which to put VRAM
- *
- * Function will try to place VRAM at base address provided
- * as parameter.
- */
-void amdgpu_device_vram_location(struct amdgpu_device *adev,
- struct amdgpu_gmc *mc, u64 base)
-{
- uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
-
- mc->vram_start = base;
- mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
- if (limit && limit < mc->real_vram_size)
- mc->real_vram_size = limit;
- dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
- mc->mc_vram_size >> 20, mc->vram_start,
- mc->vram_end, mc->real_vram_size >> 20);
-}
-
-/**
- * amdgpu_device_gart_location - try to find GART location
- *
- * @adev: amdgpu device structure holding all necessary informations
- * @mc: memory controller structure holding memory informations
- *
- * Function will place try to place GART before or after VRAM.
- *
- * If GART size is bigger than space left then we ajust GART size.
- * Thus function will never fails.
- */
-void amdgpu_device_gart_location(struct amdgpu_device *adev,
- struct amdgpu_gmc *mc)
-{
- u64 size_af, size_bf;
-
- mc->gart_size += adev->pm.smu_prv_buffer_size;
-
- size_af = adev->gmc.mc_mask - mc->vram_end;
- size_bf = mc->vram_start;
- if (size_bf > size_af) {
- if (mc->gart_size > size_bf) {
- dev_warn(adev->dev, "limiting GART\n");
- mc->gart_size = size_bf;
- }
- mc->gart_start = 0;
- } else {
- if (mc->gart_size > size_af) {
- dev_warn(adev->dev, "limiting GART\n");
- mc->gart_size = size_af;
- }
- /* VCE doesn't like it when BOs cross a 4GB segment, so align
- * the GART base on a 4GB boundary as well.
- */
- mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
- }
- mc->gart_end = mc->gart_start + mc->gart_size - 1;
- dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
- mc->gart_size >> 20, mc->gart_start, mc->gart_end);
-}
-
-/**
* amdgpu_device_resize_fb_bar - try to resize FB BAR
*
* @adev: amdgpu_device pointer
@@ -1397,7 +1334,12 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
chip_name = "vega12";
break;
case CHIP_RAVEN:
- chip_name = "raven";
+ if (adev->rev_id >= 8)
+ chip_name = "raven2";
+ else if (adev->pdev->device == 0x15d8)
+ chip_name = "picasso";
+ else
+ chip_name = "raven";
break;
}
@@ -1551,6 +1493,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
}
adev->powerplay.pp_feature = amdgpu_pp_feature_mask;
+ if (amdgpu_sriov_vf(adev))
+ adev->powerplay.pp_feature &= ~PP_GFXOFF_MASK;
for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
@@ -1651,6 +1595,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = true;
}
+ amdgpu_xgmi_add_device(adev);
amdgpu_amdkfd_device_init(adev);
if (amdgpu_sriov_vf(adev))
@@ -1690,24 +1635,27 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
}
/**
- * amdgpu_device_ip_late_set_cg_state - late init for clockgating
+ * amdgpu_device_set_cg_state - set clockgating for amdgpu device
*
* @adev: amdgpu_device pointer
*
- * Late initialization pass enabling clockgating for hardware IPs.
* The list of all the hardware IPs that make up the asic is walked and the
- * set_clockgating_state callbacks are run. This stage is run late
- * in the init process.
+ * set_clockgating_state callbacks are run.
+ * Late initialization pass enabling clockgating for hardware IPs.
+ * Fini or suspend, pass disabling clockgating for hardware IPs.
* Returns 0 on success, negative error code on failure.
*/
-static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
+
+static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
+ enum amd_clockgating_state state)
{
- int i = 0, r;
+ int i, j, r;
if (amdgpu_emu_mode == 1)
return 0;
- for (i = 0; i < adev->num_ip_blocks; i++) {
+ for (j = 0; j < adev->num_ip_blocks; j++) {
+ i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
if (!adev->ip_blocks[i].status.valid)
continue;
/* skip CG for VCE/UVD, it's handled specially */
@@ -1717,7 +1665,7 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
adev->ip_blocks[i].version->funcs->set_clockgating_state) {
/* enable clockgating to save power */
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_GATE);
+ state);
if (r) {
DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
@@ -1729,14 +1677,15 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_device_ip_late_set_pg_state(struct amdgpu_device *adev)
+static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
{
- int i = 0, r;
+ int i, j, r;
if (amdgpu_emu_mode == 1)
return 0;
- for (i = 0; i < adev->num_ip_blocks; i++) {
+ for (j = 0; j < adev->num_ip_blocks; j++) {
+ i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
if (!adev->ip_blocks[i].status.valid)
continue;
/* skip CG for VCE/UVD, it's handled specially */
@@ -1746,7 +1695,7 @@ static int amdgpu_device_ip_late_set_pg_state(struct amdgpu_device *adev)
adev->ip_blocks[i].version->funcs->set_powergating_state) {
/* enable powergating to save power */
r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
- AMD_PG_STATE_GATE);
+ state);
if (r) {
DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
@@ -1787,8 +1736,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
}
}
- amdgpu_device_ip_late_set_cg_state(adev);
- amdgpu_device_ip_late_set_pg_state(adev);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
queue_delayed_work(system_wq, &adev->late_init_work,
msecs_to_jiffies(AMDGPU_RESUME_MS));
@@ -1814,22 +1763,15 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
int i, r;
amdgpu_amdkfd_device_fini(adev);
+
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+
/* need to disable SMC first */
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.hw)
continue;
- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC &&
- adev->ip_blocks[i].version->funcs->set_clockgating_state) {
- /* ungate blocks before hw fini so that we can shutdown the blocks safely */
- r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_UNGATE);
- if (r) {
- DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- return r;
- }
- if (adev->powerplay.pp_funcs->set_powergating_by_smu)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false);
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
/* XXX handle errors */
if (r) {
@@ -1845,20 +1787,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (!adev->ip_blocks[i].status.hw)
continue;
- if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
- adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
- adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
- adev->ip_blocks[i].version->funcs->set_clockgating_state) {
- /* ungate blocks before hw fini so that we can shutdown the blocks safely */
- r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_UNGATE);
- if (r) {
- DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- return r;
- }
- }
-
r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
/* XXX handle errors */
if (r) {
@@ -1906,13 +1834,9 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
}
/**
- * amdgpu_device_ip_late_init_func_handler - work handler for clockgating
- *
- * @work: work_struct
+ * amdgpu_device_ip_late_init_func_handler - work handler for ib test
*
- * Work handler for amdgpu_device_ip_late_set_cg_state. We put the
- * clockgating setup into a worker thread to speed up driver init and
- * resume from suspend.
+ * @work: work_struct.
*/
static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
{
@@ -1925,6 +1849,19 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
DRM_ERROR("ib ring test failed (%d).\n", r);
}
+static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
+{
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
+
+ mutex_lock(&adev->gfx.gfx_off_mutex);
+ if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
+ if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
+ adev->gfx.gfx_off_state = true;
+ }
+ mutex_unlock(&adev->gfx.gfx_off_mutex);
+}
+
/**
* amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
*
@@ -1940,23 +1877,14 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{
int i, r;
- if (amdgpu_sriov_vf(adev))
- amdgpu_virt_request_full_gpu(adev, false);
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)
continue;
/* displays are handled separately */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
- /* ungate blocks so that suspend can properly shut them down */
- if (adev->ip_blocks[i].version->funcs->set_clockgating_state) {
- r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_UNGATE);
- if (r) {
- DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- }
- }
/* XXX handle errors */
r = adev->ip_blocks[i].version->funcs->suspend(adev);
/* XXX handle errors */
@@ -1967,9 +1895,6 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
}
}
- if (amdgpu_sriov_vf(adev))
- amdgpu_virt_release_full_gpu(adev, false);
-
return 0;
}
@@ -1988,36 +1913,12 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
{
int i, r;
- if (amdgpu_sriov_vf(adev))
- amdgpu_virt_request_full_gpu(adev, false);
-
- /* ungate SMC block first */
- r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
- AMD_CG_STATE_UNGATE);
- if (r) {
- DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
- }
-
- /* call smu to disable gfx off feature first when suspend */
- if (adev->powerplay.pp_funcs->set_powergating_by_smu)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false);
-
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)
continue;
/* displays are handled in phase1 */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
continue;
- /* ungate blocks so that suspend can properly shut them down */
- if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
- adev->ip_blocks[i].version->funcs->set_clockgating_state) {
- r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_UNGATE);
- if (r) {
- DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- }
- }
/* XXX handle errors */
r = adev->ip_blocks[i].version->funcs->suspend(adev);
/* XXX handle errors */
@@ -2027,9 +1928,6 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
}
}
- if (amdgpu_sriov_vf(adev))
- amdgpu_virt_release_full_gpu(adev, false);
-
return 0;
}
@@ -2048,11 +1946,17 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
{
int r;
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_request_full_gpu(adev, false);
+
r = amdgpu_device_ip_suspend_phase1(adev);
if (r)
return r;
r = amdgpu_device_ip_suspend_phase2(adev);
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_release_full_gpu(adev, false);
+
return r;
}
@@ -2063,6 +1967,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
static enum amd_ip_block_type ip_order[] = {
AMD_IP_BLOCK_TYPE_GMC,
AMD_IP_BLOCK_TYPE_COMMON,
+ AMD_IP_BLOCK_TYPE_PSP,
AMD_IP_BLOCK_TYPE_IH,
};
@@ -2093,7 +1998,6 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
static enum amd_ip_block_type ip_order[] = {
AMD_IP_BLOCK_TYPE_SMC,
- AMD_IP_BLOCK_TYPE_PSP,
AMD_IP_BLOCK_TYPE_DCE,
AMD_IP_BLOCK_TYPE_GFX,
AMD_IP_BLOCK_TYPE_SDMA,
@@ -2335,7 +2239,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->mman.buffer_funcs = NULL;
adev->mman.buffer_funcs_ring = NULL;
adev->vm_manager.vm_pte_funcs = NULL;
- adev->vm_manager.vm_pte_num_rings = 0;
+ adev->vm_manager.vm_pte_num_rqs = 0;
adev->gmc.gmc_funcs = NULL;
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
@@ -2367,6 +2271,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->gfx.gpu_clock_mutex);
mutex_init(&adev->srbm_mutex);
mutex_init(&adev->gfx.pipe_reserve_mutex);
+ mutex_init(&adev->gfx.gfx_off_mutex);
mutex_init(&adev->grbm_idx_mutex);
mutex_init(&adev->mn_lock);
mutex_init(&adev->virt.vf_errors.lock);
@@ -2393,7 +2298,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_DELAYED_WORK(&adev->late_init_work,
amdgpu_device_ip_late_init_func_handler);
+ INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
+ amdgpu_device_delay_enable_gfx_off);
+ adev->gfx.gfx_off_req_count = 1;
adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
/* Registers mapping */
@@ -2705,6 +2613,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
if (fbcon)
amdgpu_fbdev_set_suspend(adev, 1);
+ cancel_delayed_work_sync(&adev->late_init_work);
+
if (!amdgpu_device_has_dc_support(adev)) {
/* turn off display hw */
drm_modeset_lock_all(dev);
@@ -3041,71 +2951,22 @@ static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
}
/**
- * amdgpu_device_recover_vram_from_shadow - restore shadowed VRAM buffers
- *
- * @adev: amdgpu_device pointer
- * @ring: amdgpu_ring for the engine handling the buffer operations
- * @bo: amdgpu_bo buffer whose shadow is being restored
- * @fence: dma_fence associated with the operation
- *
- * Restores the VRAM buffer contents from the shadow in GTT. Used to
- * restore things like GPUVM page tables after a GPU reset where
- * the contents of VRAM might be lost.
- * Returns 0 on success, negative error code on failure.
- */
-static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct dma_fence **fence)
-{
- uint32_t domain;
- int r;
-
- if (!bo->shadow)
- return 0;
-
- r = amdgpu_bo_reserve(bo, true);
- if (r)
- return r;
- domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
- /* if bo has been evicted, then no need to recover */
- if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
- r = amdgpu_bo_validate(bo->shadow);
- if (r) {
- DRM_ERROR("bo validate failed!\n");
- goto err;
- }
-
- r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
- NULL, fence, true);
- if (r) {
- DRM_ERROR("recover page table failed!\n");
- goto err;
- }
- }
-err:
- amdgpu_bo_unreserve(bo);
- return r;
-}
-
-/**
- * amdgpu_device_handle_vram_lost - Handle the loss of VRAM contents
+ * amdgpu_device_recover_vram - Recover some VRAM contents
*
* @adev: amdgpu_device pointer
*
* Restores the contents of VRAM buffers from the shadows in GTT. Used to
* restore things like GPUVM page tables after a GPU reset where
* the contents of VRAM might be lost.
- * Returns 0 on success, 1 on failure.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure.
*/
-static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
+static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- struct amdgpu_bo *bo, *tmp;
struct dma_fence *fence = NULL, *next = NULL;
- long r = 1;
- int i = 0;
- long tmo;
+ struct amdgpu_bo *shadow;
+ long r = 1, tmo;
if (amdgpu_sriov_runtime(adev))
tmo = msecs_to_jiffies(8000);
@@ -3114,44 +2975,40 @@ static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
DRM_INFO("recover vram bo from shadow start\n");
mutex_lock(&adev->shadow_list_lock);
- list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
- next = NULL;
- amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
+ list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
+
+ /* No need to recover an evicted BO */
+ if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
+ shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
+ continue;
+
+ r = amdgpu_bo_restore_shadow(shadow, &next);
+ if (r)
+ break;
+
if (fence) {
r = dma_fence_wait_timeout(fence, false, tmo);
- if (r == 0)
- pr_err("wait fence %p[%d] timeout\n", fence, i);
- else if (r < 0)
- pr_err("wait fence %p[%d] interrupted\n", fence, i);
- if (r < 1) {
- dma_fence_put(fence);
- fence = next;
+ dma_fence_put(fence);
+ fence = next;
+ if (r <= 0)
break;
- }
- i++;
+ } else {
+ fence = next;
}
-
- dma_fence_put(fence);
- fence = next;
}
mutex_unlock(&adev->shadow_list_lock);
- if (fence) {
- r = dma_fence_wait_timeout(fence, false, tmo);
- if (r == 0)
- pr_err("wait fence %p[%d] timeout\n", fence, i);
- else if (r < 0)
- pr_err("wait fence %p[%d] interrupted\n", fence, i);
-
- }
+ if (fence)
+ tmo = dma_fence_wait_timeout(fence, false, tmo);
dma_fence_put(fence);
- if (r > 0)
- DRM_INFO("recover vram bo from shadow done\n");
- else
+ if (r <= 0 || tmo <= 0) {
DRM_ERROR("recover vram bo from shadow failed\n");
+ return -EIO;
+ }
- return (r > 0) ? 0 : 1;
+ DRM_INFO("recover vram bo from shadow done\n");
+ return 0;
}
/**
@@ -3225,8 +3082,8 @@ out:
}
}
- if (!r && ((need_full_reset && !(adev->flags & AMD_IS_APU)) || vram_lost))
- r = amdgpu_device_handle_vram_lost(adev);
+ if (!r)
+ r = amdgpu_device_recover_vram(adev);
return r;
}
@@ -3272,38 +3129,50 @@ error:
amdgpu_virt_release_full_gpu(adev, true);
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
atomic_inc(&adev->vram_lost_counter);
- r = amdgpu_device_handle_vram_lost(adev);
+ r = amdgpu_device_recover_vram(adev);
}
return r;
}
/**
+ * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
+ * a hung GPU.
+ */
+bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
+{
+ if (!amdgpu_device_ip_check_soft_reset(adev)) {
+ DRM_INFO("Timeout, but no hardware hang detected.\n");
+ return false;
+ }
+
+ if (amdgpu_gpu_recovery == 0 || (amdgpu_gpu_recovery == -1 &&
+ !amdgpu_sriov_vf(adev))) {
+ DRM_INFO("GPU recovery disabled.\n");
+ return false;
+ }
+
+ return true;
+}
+
+/**
* amdgpu_device_gpu_recover - reset the asic and recover scheduler
*
* @adev: amdgpu device pointer
* @job: which job trigger hang
- * @force: forces reset regardless of amdgpu_gpu_recovery
*
* Attempt to reset the GPU if it has hung (all asics).
* Returns 0 for success or an error on failure.
*/
int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
- struct amdgpu_job *job, bool force)
+ struct amdgpu_job *job)
{
int i, r, resched;
- if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
- DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
- return 0;
- }
-
- if (!force && (amdgpu_gpu_recovery == 0 ||
- (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))) {
- DRM_INFO("GPU recovery disabled.\n");
- return 0;
- }
-
dev_info(adev->dev, "GPU reset begin!\n");
mutex_lock(&adev->lock_reset);