summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/intel_gtt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gt/intel_gtt.c')
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c146
1 files changed, 105 insertions, 41 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 941f8af016d6..084ea65d59c0 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -7,10 +7,29 @@
#include <linux/fault-inject.h>
+#include "gem/i915_gem_lmem.h"
#include "i915_trace.h"
#include "intel_gt.h"
#include "intel_gtt.h"
+struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_create_lmem(vm->i915, sz, 0);
+ /*
+ * Ensure all paging structures for this vm share the same dma-resv
+ * object underneath, with the idea that one object_lock() will lock
+ * them all at once.
+ */
+ if (!IS_ERR(obj)) {
+ obj->base.resv = i915_vm_resv_get(vm);
+ obj->shares_resv_from = vm;
+ }
+
+ return obj;
+}
+
struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
{
struct drm_i915_gem_object *obj;
@@ -19,33 +38,42 @@ struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
i915_gem_shrink_all(vm->i915);
obj = i915_gem_object_create_internal(vm->i915, sz);
- /* ensure all dma objects have the same reservation class */
- if (!IS_ERR(obj))
- obj->base.resv = &vm->resv;
+ /*
+ * Ensure all paging structures for this vm share the same dma-resv
+ * object underneath, with the idea that one object_lock() will lock
+ * them all at once.
+ */
+ if (!IS_ERR(obj)) {
+ obj->base.resv = i915_vm_resv_get(vm);
+ obj->shares_resv_from = vm;
+ }
+
return obj;
}
-int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
+int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
{
- int err;
+ enum i915_map_type type;
+ void *vaddr;
- i915_gem_object_lock(obj, NULL);
- err = i915_gem_object_pin_pages(obj);
- i915_gem_object_unlock(obj);
- if (err)
- return err;
+ type = i915_coherent_map_type(vm->i915, obj, true);
+ vaddr = i915_gem_object_pin_map_unlocked(obj, type);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
i915_gem_object_make_unshrinkable(obj);
return 0;
}
-int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
+int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
{
- int err;
+ enum i915_map_type type;
+ void *vaddr;
- err = i915_gem_object_pin_pages(obj);
- if (err)
- return err;
+ type = i915_coherent_map_type(vm->i915, obj, true);
+ vaddr = i915_gem_object_pin_map(obj, type);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
i915_gem_object_make_unshrinkable(obj);
return 0;
@@ -80,7 +108,7 @@ void __i915_vm_close(struct i915_address_space *vm)
int i915_vm_lock_objects(struct i915_address_space *vm,
struct i915_gem_ww_ctx *ww)
{
- if (vm->scratch[0]->base.resv == &vm->resv) {
+ if (vm->scratch[0]->base.resv == &vm->_resv) {
return i915_gem_object_lock(vm->scratch[0], ww);
} else {
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
@@ -96,6 +124,22 @@ void i915_address_space_fini(struct i915_address_space *vm)
mutex_destroy(&vm->mutex);
}
+/**
+ * i915_vm_resv_release - Final struct i915_address_space destructor
+ * @kref: Pointer to the &i915_address_space.resv_ref member.
+ *
+ * This function is called when the last lock sharer no longer shares the
+ * &i915_address_space._resv lock.
+ */
+void i915_vm_resv_release(struct kref *kref)
+{
+ struct i915_address_space *vm =
+ container_of(kref, typeof(*vm), resv_ref);
+
+ dma_resv_fini(&vm->_resv);
+ kfree(vm);
+}
+
static void __i915_vm_release(struct work_struct *work)
{
struct i915_address_space *vm =
@@ -103,9 +147,8 @@ static void __i915_vm_release(struct work_struct *work)
vm->cleanup(vm);
i915_address_space_fini(vm);
- dma_resv_fini(&vm->resv);
- kfree(vm);
+ i915_vm_resv_put(vm);
}
void i915_vm_release(struct kref *kref)
@@ -122,6 +165,14 @@ void i915_vm_release(struct kref *kref)
void i915_address_space_init(struct i915_address_space *vm, int subclass)
{
kref_init(&vm->ref);
+
+ /*
+ * Special case for GGTT that has already done an early
+ * kref_init here.
+ */
+ if (!kref_read(&vm->resv_ref))
+ kref_init(&vm->resv_ref);
+
INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
atomic_set(&vm->open, 1);
@@ -132,8 +183,23 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
*/
mutex_init(&vm->mutex);
lockdep_set_subclass(&vm->mutex, subclass);
- i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
- dma_resv_init(&vm->resv);
+
+ if (!intel_vm_no_concurrent_access_wa(vm->i915)) {
+ i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
+ } else {
+ /*
+ * CHV + BXT VTD workaround use stop_machine(),
+ * which is allowed to allocate memory. This means &vm->mutex
+ * is the outer lock, and in theory we can allocate memory inside
+ * it through stop_machine().
+ *
+ * Add the annotation for this, we use trylock in shrinker.
+ */
+ mutex_acquire(&vm->mutex.dep_map, 0, 0, _THIS_IP_);
+ might_alloc(GFP_KERNEL);
+ mutex_release(&vm->mutex.dep_map, _THIS_IP_);
+ }
+ dma_resv_init(&vm->_resv);
GEM_BUG_ON(!vm->total);
drm_mm_init(&vm->mm, 0, vm->total);
@@ -155,6 +221,14 @@ void clear_pages(struct i915_vma *vma)
memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
}
+void *__px_vaddr(struct drm_i915_gem_object *p)
+{
+ enum i915_map_type type;
+
+ GEM_BUG_ON(!i915_gem_object_has_pages(p));
+ return page_unpack_bits(p->mm.mapping, &type);
+}
+
dma_addr_t __px_dma(struct drm_i915_gem_object *p)
{
GEM_BUG_ON(!i915_gem_object_has_pages(p));
@@ -170,32 +244,22 @@ struct page *__px_page(struct drm_i915_gem_object *p)
void
fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count)
{
- struct page *page = __px_page(p);
- void *vaddr;
+ void *vaddr = __px_vaddr(p);
- vaddr = kmap(page);
memset64(vaddr, val, count);
clflush_cache_range(vaddr, PAGE_SIZE);
- kunmap(page);
}
static void poison_scratch_page(struct drm_i915_gem_object *scratch)
{
- struct sgt_iter sgt;
- struct page *page;
+ void *vaddr = __px_vaddr(scratch);
u8 val;
val = 0;
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
val = POISON_FREE;
- for_each_sgt_page(page, sgt, scratch->mm.pages) {
- void *vaddr;
-
- vaddr = kmap(page);
- memset(vaddr, val, PAGE_SIZE);
- kunmap(page);
- }
+ memset(vaddr, val, scratch->base.size);
}
int setup_scratch_page(struct i915_address_space *vm)
@@ -225,7 +289,7 @@ int setup_scratch_page(struct i915_address_space *vm)
if (IS_ERR(obj))
goto skip;
- if (pin_pt_dma(vm, obj))
+ if (map_pt_dma(vm, obj))
goto skip_obj;
/* We need a single contiguous page for our scratch */
@@ -292,7 +356,7 @@ void gtt_write_workarounds(struct intel_gt *gt)
intel_uncore_write(uncore,
GEN8_L3_LRA_1_GPGPU,
GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
- else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11)
+ else if (GRAPHICS_VER(i915) >= 9 && GRAPHICS_VER(i915) <= 11)
intel_uncore_write(uncore,
GEN8_L3_LRA_1_GPGPU,
GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
@@ -309,13 +373,13 @@ void gtt_write_workarounds(struct intel_gt *gt)
* driver.
*/
if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
- INTEL_GEN(i915) <= 10)
+ GRAPHICS_VER(i915) <= 10)
intel_uncore_rmw(uncore,
GEN8_GAMW_ECO_DEV_RW_IA,
0,
GAMW_ECO_ENABLE_64K_IPS_FIELD);
- if (IS_GEN_RANGE(i915, 8, 11)) {
+ if (IS_GRAPHICS_VER(i915, 8, 11)) {
bool can_use_gtt_cache = true;
/*
@@ -397,7 +461,7 @@ static void bdw_setup_private_ppat(struct intel_uncore *uncore)
GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
/* for scanout with eLLC */
- if (INTEL_GEN(i915) >= 9)
+ if (GRAPHICS_VER(i915) >= 9)
pat |= GEN8_PPAT(2, GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
else
pat |= GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
@@ -446,11 +510,11 @@ void setup_private_pat(struct intel_uncore *uncore)
{
struct drm_i915_private *i915 = uncore->i915;
- GEM_BUG_ON(INTEL_GEN(i915) < 8);
+ GEM_BUG_ON(GRAPHICS_VER(i915) < 8);
- if (INTEL_GEN(i915) >= 12)
+ if (GRAPHICS_VER(i915) >= 12)
tgl_setup_private_ppat(uncore);
- else if (INTEL_GEN(i915) >= 10)
+ else if (GRAPHICS_VER(i915) >= 10)
cnl_setup_private_ppat(uncore);
else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
chv_setup_private_ppat(uncore);