summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/drm_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/drm_gem.c')
-rw-r--r--drivers/gpu/drm/drm_gem.c170
1 files changed, 170 insertions, 0 deletions
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ad068865ba20..8b68a3c1e6ab 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -165,6 +165,7 @@ void drm_gem_private_object_init(struct drm_device *dev,
obj->resv = &obj->_resv;
drm_vma_node_reset(&obj->vma_node);
+ INIT_LIST_HEAD(&obj->lru_node);
}
EXPORT_SYMBOL(drm_gem_private_object_init);
@@ -936,6 +937,7 @@ drm_gem_object_release(struct drm_gem_object *obj)
dma_resv_fini(&obj->_resv);
drm_gem_free_mmap_offset(obj);
+ drm_gem_lru_remove(obj);
}
EXPORT_SYMBOL(drm_gem_object_release);
@@ -1259,3 +1261,171 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
ww_acquire_fini(acquire_ctx);
}
EXPORT_SYMBOL(drm_gem_unlock_reservations);
+
+/**
+ * drm_gem_lru_init - initialize a LRU
+ *
+ * @lru: The LRU to initialize
+ * @lock: The lock protecting the LRU
+ */
+void
+drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock)
+{
+ lru->lock = lock;
+ lru->count = 0;
+ INIT_LIST_HEAD(&lru->list);
+}
+EXPORT_SYMBOL(drm_gem_lru_init);
+
+static void
+drm_gem_lru_remove_locked(struct drm_gem_object *obj)
+{
+ obj->lru->count -= obj->size >> PAGE_SHIFT;
+ WARN_ON(obj->lru->count < 0);
+ list_del(&obj->lru_node);
+ obj->lru = NULL;
+}
+
+/**
+ * drm_gem_lru_remove - remove object from whatever LRU it is in
+ *
+ * If the object is currently in any LRU, remove it.
+ *
+ * @obj: The GEM object to remove from current LRU
+ */
+void
+drm_gem_lru_remove(struct drm_gem_object *obj)
+{
+ struct drm_gem_lru *lru = obj->lru;
+
+ if (!lru)
+ return;
+
+ mutex_lock(lru->lock);
+ drm_gem_lru_remove_locked(obj);
+ mutex_unlock(lru->lock);
+}
+EXPORT_SYMBOL(drm_gem_lru_remove);
+
+static void
+drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj)
+{
+ lockdep_assert_held_once(lru->lock);
+
+ if (obj->lru)
+ drm_gem_lru_remove_locked(obj);
+
+ lru->count += obj->size >> PAGE_SHIFT;
+ list_add_tail(&obj->lru_node, &lru->list);
+ obj->lru = lru;
+}
+
+/**
+ * drm_gem_lru_move_tail - move the object to the tail of the LRU
+ *
+ * If the object is already in this LRU it will be moved to the
+ * tail. Otherwise it will be removed from whichever other LRU
+ * it is in (if any) and moved into this LRU.
+ *
+ * @lru: The LRU to move the object into.
+ * @obj: The GEM object to move into this LRU
+ */
+void
+drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj)
+{
+ mutex_lock(lru->lock);
+ drm_gem_lru_move_tail_locked(lru, obj);
+ mutex_unlock(lru->lock);
+}
+EXPORT_SYMBOL(drm_gem_lru_move_tail);
+
+/**
+ * drm_gem_lru_scan - helper to implement shrinker.scan_objects
+ *
+ * If the shrink callback succeeds, it is expected that the driver
+ * move the object out of this LRU.
+ *
+ * If the LRU possibly contain active buffers, it is the responsibility
+ * of the shrink callback to check for this (ie. dma_resv_test_signaled())
+ * or if necessary block until the buffer becomes idle.
+ *
+ * @lru: The LRU to scan
+ * @nr_to_scan: The number of pages to try to reclaim
+ * @shrink: Callback to try to shrink/reclaim the object.
+ */
+unsigned long
+drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
+ bool (*shrink)(struct drm_gem_object *obj))
+{
+ struct drm_gem_lru still_in_lru;
+ struct drm_gem_object *obj;
+ unsigned freed = 0;
+
+ drm_gem_lru_init(&still_in_lru, lru->lock);
+
+ mutex_lock(lru->lock);
+
+ while (freed < nr_to_scan) {
+ obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node);
+
+ if (!obj)
+ break;
+
+ drm_gem_lru_move_tail_locked(&still_in_lru, obj);
+
+ /*
+ * If it's in the process of being freed, gem_object->free()
+ * may be blocked on lock waiting to remove it. So just
+ * skip it.
+ */
+ if (!kref_get_unless_zero(&obj->refcount))
+ continue;
+
+ /*
+ * Now that we own a reference, we can drop the lock for the
+ * rest of the loop body, to reduce contention with other
+ * code paths that need the LRU lock
+ */
+ mutex_unlock(lru->lock);
+
+ /*
+ * Note that this still needs to be trylock, since we can
+ * hit shrinker in response to trying to get backing pages
+ * for this obj (ie. while it's lock is already held)
+ */
+ if (!dma_resv_trylock(obj->resv))
+ goto tail;
+
+ if (shrink(obj)) {
+ freed += obj->size >> PAGE_SHIFT;
+
+ /*
+ * If we succeeded in releasing the object's backing
+ * pages, we expect the driver to have moved the object
+ * out of this LRU
+ */
+ WARN_ON(obj->lru == &still_in_lru);
+ WARN_ON(obj->lru == lru);
+ }
+
+ dma_resv_unlock(obj->resv);
+
+tail:
+ drm_gem_object_put(obj);
+ mutex_lock(lru->lock);
+ }
+
+ /*
+ * Move objects we've skipped over out of the temporary still_in_lru
+ * back into this LRU
+ */
+ list_for_each_entry (obj, &still_in_lru.list, lru_node)
+ obj->lru = lru;
+ list_splice_tail(&still_in_lru.list, &lru->list);
+ lru->count += still_in_lru.count;
+
+ mutex_unlock(lru->lock);
+
+ return freed;
+}
+EXPORT_SYMBOL(drm_gem_lru_scan);