summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorMaarten Lankhorst <m.b.lankhorst@gmail.com>2012-11-28 15:25:43 +0400
committerDave Airlie <airlied@redhat.com>2012-12-10 14:21:22 +0400
commite7ab20197be3ee5fd75441e1cff0c7cdfea5bf1a (patch)
tree7c43fd066b4fd7f4aef8a4fb7b1d3c47d831c422 /drivers/gpu/drm/ttm
parent2b7b3ad2fb8f904ae9ba7ca71323bc11c0978d91 (diff)
downloadlinux-e7ab20197be3ee5fd75441e1cff0c7cdfea5bf1a.tar.xz
drm/ttm: cope with reserved buffers on lru list in ttm_mem_evict_first, v2
Replace the goto loop with a simple for each loop, and only run the delayed destroy cleanup if we can reserve the buffer first. No race occurs, since lru lock is never dropped any more. An empty list and a list full of unreservable buffers both cause -EBUSY to be returned, which is identical to the previous situation, because previously buffers on the lru list were always guaranteed to be reservable. This should work since currently ttm guarantees items on the lru are always reservable, and reserving items blockingly with some bo held are enough to cause you to run into a deadlock. Currently this is not a concern since removal off the lru list and reservations are always done with atomically, but when this guarantee no longer holds, we have to handle this situation or end up with possible deadlocks. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c42
1 files changed, 11 insertions, 31 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 9a479885bf59..6059771d506e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -811,49 +811,29 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_buffer_object *bo;
- int ret, put_count = 0;
+ int ret = -EBUSY, put_count;
-retry:
spin_lock(&glob->lru_lock);
- if (list_empty(&man->lru)) {
+ list_for_each_entry(bo, &man->lru, lru) {
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ if (!ret)
+ break;
+ }
+
+ if (ret) {
spin_unlock(&glob->lru_lock);
- return -EBUSY;
+ return ret;
}
- bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
kref_get(&bo->list_kref);
if (!list_empty(&bo->ddestroy)) {
- ret = ttm_bo_reserve_locked(bo, interruptible, no_wait_reserve, false, 0);
- if (!ret)
- ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
- no_wait_gpu);
- else
- spin_unlock(&glob->lru_lock);
-
+ ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
+ no_wait_gpu);
kref_put(&bo->list_kref, ttm_bo_release_list);
-
return ret;
}
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
-
- if (unlikely(ret == -EBUSY)) {
- spin_unlock(&glob->lru_lock);
- if (likely(!no_wait_reserve))
- ret = ttm_bo_wait_unreserved(bo, interruptible);
-
- kref_put(&bo->list_kref, ttm_bo_release_list);
-
- /**
- * We *need* to retry after releasing the lru lock.
- */
-
- if (unlikely(ret != 0))
- return ret;
- goto retry;
- }
-
put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);