summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gem/i915_gem_ttm.c')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c84
1 files changed, 38 insertions, 46 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 6f3ab7ade41a..e3fc38dd5db0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -361,7 +361,6 @@ static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- struct ttm_resource *res = bo->resource;
if (!obj)
return false;
@@ -378,45 +377,7 @@ static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
if (!i915_gem_object_evictable(obj))
return false;
- switch (res->mem_type) {
- case I915_PL_LMEM0: {
- struct ttm_resource_manager *man =
- ttm_manager_type(bo->bdev, res->mem_type);
- struct i915_ttm_buddy_resource *bman_res =
- to_ttm_buddy_resource(res);
- struct drm_buddy *mm = bman_res->mm;
- struct drm_buddy_block *block;
-
- if (!place->fpfn && !place->lpfn)
- return true;
-
- GEM_BUG_ON(!place->lpfn);
-
- /*
- * If we just want something mappable then we can quickly check
- * if the current victim resource is using any of the CPU
- * visible portion.
- */
- if (!place->fpfn &&
- place->lpfn == i915_ttm_buddy_man_visible_size(man))
- return bman_res->used_visible_size > 0;
-
- /* Real range allocation */
- list_for_each_entry(block, &bman_res->blocks, link) {
- unsigned long fpfn =
- drm_buddy_block_offset(block) >> PAGE_SHIFT;
- unsigned long lpfn = fpfn +
- (drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
-
- if (place->fpfn < lpfn && place->lpfn > fpfn)
- return true;
- }
- return false;
- } default:
- break;
- }
-
- return true;
+ return ttm_bo_eviction_valuable(bo, place);
}
static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
@@ -548,9 +509,18 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ intel_wakeref_t wakeref = 0;
+
+ if (bo->resource && likely(obj)) {
+ /* ttm_bo_release() already has dma_resv_lock */
+ if (i915_ttm_cpu_maps_iomem(bo->resource))
+ wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
- if (likely(obj)) {
__i915_gem_object_pages_fini(obj);
+
+ if (wakeref)
+ intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
+
i915_ttm_free_cached_io_rsgt(obj);
}
}
@@ -1020,6 +990,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
struct ttm_buffer_object *bo = area->vm_private_data;
struct drm_device *dev = bo->base.dev;
struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref = 0;
vm_fault_t ret;
int idx;
@@ -1041,6 +1012,9 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
+ if (i915_ttm_cpu_maps_iomem(bo->resource))
+ wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
+
if (!i915_ttm_resource_mappable(bo->resource)) {
int err = -ENODEV;
int i;
@@ -1062,7 +1036,8 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
if (err) {
drm_dbg(dev, "Unable to make resource CPU accessible\n");
dma_resv_unlock(bo->base.resv);
- return VM_FAULT_SIGBUS;
+ ret = VM_FAULT_SIGBUS;
+ goto out_rpm;
}
}
@@ -1073,12 +1048,30 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
} else {
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
}
+
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
- return ret;
+ goto out_rpm;
+
+ /* ttm_bo_vm_reserve() already has dma_resv_lock */
+ if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
+ obj->userfault_count = 1;
+ mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+ list_add(&obj->userfault_link, &to_gt(to_i915(obj->base.dev))->lmem_userfault_list);
+ mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+ }
+
+ if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
+ intel_wakeref_auto(&to_gt(to_i915(obj->base.dev))->userfault_wakeref,
+ msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
i915_ttm_adjust_lru(obj);
dma_resv_unlock(bo->base.resv);
+
+out_rpm:
+ if (wakeref)
+ intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
+
return ret;
}
@@ -1242,9 +1235,8 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
* Similarly, in delayed_destroy, we can't call ttm_bo_put()
* until successful initialization.
*/
- ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), size,
- bo_type, &i915_sys_placement,
- page_size >> PAGE_SHIFT,
+ ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), bo_type,
+ &i915_sys_placement, page_size >> PAGE_SHIFT,
&ctx, NULL, NULL, i915_ttm_bo_destroy);
if (ret)
return i915_ttm_err_to_gem(ret);