summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/msm_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c179
1 files changed, 64 insertions, 115 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 8ddbd2e001d4..1dee0d18abbb 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -19,7 +19,7 @@
#include "msm_gpu.h"
#include "msm_mmu.h"
-static void update_inactive(struct msm_gem_object *msm_obj);
+static void update_lru(struct drm_gem_object *obj);
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
@@ -97,7 +97,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
if (!msm_obj->pages) {
struct drm_device *dev = obj->dev;
@@ -132,7 +132,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
if (msm_obj->flags & MSM_BO_WC)
sync_for_device(msm_obj);
- update_inactive(msm_obj);
+ update_lru(obj);
}
return msm_obj->pages;
@@ -174,40 +174,45 @@ static void put_pages(struct drm_gem_object *obj)
put_pages_vram(obj);
msm_obj->pages = NULL;
+ update_lru(obj);
}
}
-struct page **msm_gem_get_pages(struct drm_gem_object *obj)
+static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **p;
- msm_gem_lock(obj);
+ msm_gem_assert_locked(obj);
if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
- msm_gem_unlock(obj);
return ERR_PTR(-EBUSY);
}
p = get_pages(obj);
-
if (!IS_ERR(p)) {
- msm_obj->pin_count++;
- update_inactive(msm_obj);
+ to_msm_bo(obj)->pin_count++;
+ update_lru(obj);
}
- msm_gem_unlock(obj);
return p;
}
-void msm_gem_put_pages(struct drm_gem_object *obj)
+struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **p;
msm_gem_lock(obj);
- msm_obj->pin_count--;
- GEM_WARN_ON(msm_obj->pin_count < 0);
- update_inactive(msm_obj);
+ p = msm_gem_pin_pages_locked(obj);
+ msm_gem_unlock(obj);
+
+ return p;
+}
+
+void msm_gem_unpin_pages(struct drm_gem_object *obj)
+{
+ msm_gem_lock(obj);
+ msm_gem_unpin_locked(obj);
msm_gem_unlock(obj);
}
@@ -273,7 +278,7 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
int ret;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
/* Make it mmapable */
ret = drm_gem_create_mmap_offset(obj);
@@ -302,7 +307,7 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (!vma)
@@ -321,7 +326,7 @@ static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
list_for_each_entry(vma, &msm_obj->vmas, list) {
if (vma->aspace == aspace)
@@ -352,7 +357,7 @@ put_iova_spaces(struct drm_gem_object *obj, bool close)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
list_for_each_entry(vma, &msm_obj->vmas, list) {
if (vma->aspace) {
@@ -370,7 +375,7 @@ put_iova_vmas(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma, *tmp;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
del_vma(vma);
@@ -383,7 +388,7 @@ static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
{
struct msm_gem_vma *vma;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
vma = lookup_vma(obj, aspace);
@@ -423,19 +428,18 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
prot |= IOMMU_CACHE;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
return -EBUSY;
- pages = get_pages(obj);
+ pages = msm_gem_pin_pages_locked(obj);
if (IS_ERR(pages))
return PTR_ERR(pages);
ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size);
-
- if (!ret)
- msm_obj->pin_count++;
+ if (ret)
+ msm_gem_unpin_locked(obj);
return ret;
}
@@ -444,12 +448,12 @@ void msm_gem_unpin_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
msm_obj->pin_count--;
GEM_WARN_ON(msm_obj->pin_count < 0);
- update_inactive(msm_obj);
+ update_lru(obj);
}
struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
@@ -465,7 +469,7 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
struct msm_gem_vma *vma;
int ret;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
vma = get_vma_locked(obj, aspace, range_start, range_end);
if (IS_ERR(vma))
@@ -626,7 +630,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
if (obj->import_attach)
return ERR_PTR(-ENODEV);
@@ -658,7 +662,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
goto fail;
}
- update_inactive(msm_obj);
+ update_lru(obj);
}
return msm_obj->vaddr;
@@ -699,7 +703,7 @@ void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
GEM_WARN_ON(msm_obj->vmap_count < 1);
msm_obj->vmap_count--;
@@ -729,8 +733,7 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
/* If the obj is inactive, we might need to move it
* between inactive lists
*/
- if (msm_obj->active_count == 0)
- update_inactive(msm_obj);
+ update_lru(obj);
msm_gem_unlock(obj);
@@ -742,7 +745,7 @@ void msm_gem_purge(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
GEM_WARN_ON(!is_purgeable(msm_obj));
/* Get rid of any iommu mapping(s): */
@@ -757,7 +760,6 @@ void msm_gem_purge(struct drm_gem_object *obj)
put_iova_vmas(obj);
msm_obj->madv = __MSM_MADV_PURGED;
- update_inactive(msm_obj);
drm_gem_free_mmap_offset(obj);
@@ -780,10 +782,8 @@ void msm_gem_evict(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
GEM_WARN_ON(is_unevictable(msm_obj));
- GEM_WARN_ON(!msm_obj->evictable);
- GEM_WARN_ON(msm_obj->active_count);
/* Get rid of any iommu mapping(s): */
put_iova_spaces(obj, false);
@@ -791,15 +791,13 @@ void msm_gem_evict(struct drm_gem_object *obj)
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
put_pages(obj);
-
- update_inactive(msm_obj);
}
void msm_gem_vunmap(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
return;
@@ -808,66 +806,37 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
msm_obj->vaddr = NULL;
}
-void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
+static void update_lru(struct drm_gem_object *obj)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
-
- might_sleep();
- GEM_WARN_ON(!msm_gem_is_locked(obj));
- GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
- GEM_WARN_ON(msm_obj->dontneed);
-
- if (msm_obj->active_count++ == 0) {
- mutex_lock(&priv->mm_lock);
- if (msm_obj->evictable)
- mark_unevictable(msm_obj);
- list_move_tail(&msm_obj->mm_list, &gpu->active_list);
- mutex_unlock(&priv->mm_lock);
- }
-}
-
-void msm_gem_active_put(struct drm_gem_object *obj)
-{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- might_sleep();
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(&msm_obj->base);
- if (--msm_obj->active_count == 0) {
- update_inactive(msm_obj);
+ if (!msm_obj->pages) {
+ GEM_WARN_ON(msm_obj->pin_count);
+ GEM_WARN_ON(msm_obj->vmap_count);
+
+ drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
+ } else if (msm_obj->pin_count || msm_obj->vmap_count) {
+ drm_gem_lru_move_tail(&priv->lru.pinned, obj);
+ } else if (msm_obj->madv == MSM_MADV_WILLNEED) {
+ drm_gem_lru_move_tail(&priv->lru.willneed, obj);
+ } else {
+ GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
+
+ drm_gem_lru_move_tail(&priv->lru.dontneed, obj);
}
}
-static void update_inactive(struct msm_gem_object *msm_obj)
+bool msm_gem_active(struct drm_gem_object *obj)
{
- struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
-
- GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
+ msm_gem_assert_locked(obj);
- if (msm_obj->active_count != 0)
- return;
-
- mutex_lock(&priv->mm_lock);
-
- if (msm_obj->dontneed)
- mark_unpurgeable(msm_obj);
- if (msm_obj->evictable)
- mark_unevictable(msm_obj);
-
- list_del(&msm_obj->mm_list);
- if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
- list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
- mark_evictable(msm_obj);
- } else if (msm_obj->madv == MSM_MADV_DONTNEED) {
- list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
- mark_purgeable(msm_obj);
- } else {
- GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
- }
+ if (to_msm_bo(obj)->pin_count)
+ return true;
- mutex_unlock(&priv->mm_lock);
+ return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
}
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
@@ -910,7 +879,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
stats->all.count++;
stats->all.size += obj->size;
- if (is_active(msm_obj)) {
+ if (msm_gem_active(obj)) {
stats->active.count++;
stats->active.size += obj->size;
}
@@ -938,7 +907,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
}
seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
- msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
+ msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
obj->name, kref_read(&obj->refcount),
off, msm_obj->vaddr);
@@ -1015,15 +984,6 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
list_del(&msm_obj->node);
mutex_unlock(&priv->obj_lock);
- mutex_lock(&priv->mm_lock);
- if (msm_obj->dontneed)
- mark_unpurgeable(msm_obj);
- list_del(&msm_obj->mm_list);
- mutex_unlock(&priv->mm_lock);
-
- /* object should not be on active list: */
- GEM_WARN_ON(is_active(msm_obj));
-
put_iova_spaces(obj, true);
if (obj->import_attach) {
@@ -1183,13 +1143,6 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
to_msm_bo(obj)->vram_node = &vma->node;
- /* Call chain get_pages() -> update_inactive() tries to
- * access msm_obj->mm_list, but it is not initialized yet.
- * To avoid NULL pointer dereference error, initialize
- * mm_list to be empty.
- */
- INIT_LIST_HEAD(&msm_obj->mm_list);
-
msm_gem_lock(obj);
pages = get_pages(obj);
msm_gem_unlock(obj);
@@ -1212,9 +1165,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
}
- mutex_lock(&priv->mm_lock);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
- mutex_unlock(&priv->mm_lock);
+ drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
mutex_lock(&priv->obj_lock);
list_add_tail(&msm_obj->node, &priv->objects);
@@ -1270,9 +1221,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
msm_gem_unlock(obj);
- mutex_lock(&priv->mm_lock);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
- mutex_unlock(&priv->mm_lock);
+ drm_gem_lru_move_tail(&priv->lru.pinned, obj);
mutex_lock(&priv->obj_lock);
list_add_tail(&msm_obj->node, &priv->objects);