summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
diff options
context:
space:
mode:
authorLang Yu <Lang.Yu@amd.com>2021-06-22 19:23:35 +0300
committerAndrey Grodzovsky <andrey.grodzovsky@amd.com>2021-06-23 21:59:39 +0300
commit3e640f1bb893610378858c2e47c99dc019dfc9e9 (patch)
treeaebad1f502476222b24b20eb3cf3dbc8fd6229b4 /drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
parentae1bef72c20f9231898e2f5595751a2635d49db8 (diff)
downloadlinux-3e640f1bb893610378858c2e47c99dc019dfc9e9.tar.xz
drm/amdgpu: user temporary GTT as bounce buffer
Currently, we have a limitted GTT memory size and need a bounce buffer when doing buffer migration between VRAM and SYSTEM domain. The problem is under GTT memory pressure we can't do buffer migration between VRAM and SYSTEM domain. But in some cases we really need that. Eespecially when validating a VRAM backing store BO which resides in SYSTEM domain. v2: still account temporary GTT allocations v3 (chk): revert to the simpler change for now Signed-off-by: Lang Yu <Lang.Yu@amd.com> Signed-off-by: Christian König <christian.koenig@amd.com> Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com> Acked-by: Nirmoy Das <nirmoy.das@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210622162339.761651-2-andrey.grodzovsky@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index ec96e0b26b11..3b4520856162 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -132,14 +132,15 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
struct amdgpu_gtt_node *node;
int r;
- spin_lock(&mgr->lock);
- if (tbo->resource && tbo->resource->mem_type != TTM_PL_TT &&
- atomic64_read(&mgr->available) < num_pages) {
+ if (!(place->flags & TTM_PL_FLAG_TEMPORARY)) {
+ spin_lock(&mgr->lock);
+ if (atomic64_read(&mgr->available) < num_pages) {
+ spin_unlock(&mgr->lock);
+ return -ENOSPC;
+ }
+ atomic64_sub(num_pages, &mgr->available);
spin_unlock(&mgr->lock);
- return -ENOSPC;
}
- atomic64_sub(num_pages, &mgr->available);
- spin_unlock(&mgr->lock);
node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
if (!node) {
@@ -175,7 +176,8 @@ err_free:
kfree(node);
err_out:
- atomic64_add(num_pages, &mgr->available);
+ if (!(place->flags & TTM_PL_FLAG_TEMPORARY))
+ atomic64_add(num_pages, &mgr->available);
return r;
}
@@ -198,7 +200,9 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
drm_mm_remove_node(&node->base.mm_nodes[0]);
spin_unlock(&mgr->lock);
- atomic64_add(res->num_pages, &mgr->available);
+
+ if (!(res->placement & TTM_PL_FLAG_TEMPORARY))
+ atomic64_add(res->num_pages, &mgr->available);
kfree(node);
}