summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gt/gen8_ppgtt.c')
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c181
1 files changed, 83 insertions, 98 deletions
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 699125928272..eb64f474a78c 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -181,7 +181,7 @@ static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
} while (pde++, --count);
}
- free_px(vm, pd);
+ free_px(vm, &pd->pt, lvl);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
@@ -199,7 +199,7 @@ static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
struct i915_page_directory * const pd,
u64 start, const u64 end, int lvl)
{
- const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
+ const struct drm_i915_gem_object * const scratch = vm->scratch[lvl];
unsigned int idx, len;
GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
@@ -239,7 +239,7 @@ static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
vaddr = kmap_atomic_px(pt);
memset64(vaddr + gen8_pd_index(start, 0),
- vm->scratch[0].encode,
+ vm->scratch[0]->encode,
count);
kunmap_atomic(vaddr);
@@ -248,7 +248,7 @@ static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
}
if (release_pd_entry(pd, idx, pt, scratch))
- free_px(vm, pt);
+ free_px(vm, pt, lvl);
} while (idx++, --len);
return start;
@@ -269,14 +269,12 @@ static void gen8_ppgtt_clear(struct i915_address_space *vm,
start, start + length, vm->top);
}
-static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
- struct i915_page_directory * const pd,
- u64 * const start, const u64 end, int lvl)
+static void __gen8_ppgtt_alloc(struct i915_address_space * const vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_page_directory * const pd,
+ u64 * const start, const u64 end, int lvl)
{
- const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
- struct i915_page_table *alloc = NULL;
unsigned int idx, len;
- int ret = 0;
GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
@@ -297,49 +295,31 @@ static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
__func__, vm, lvl + 1, idx);
- pt = fetch_and_zero(&alloc);
- if (lvl) {
- if (!pt) {
- pt = &alloc_pd(vm)->pt;
- if (IS_ERR(pt)) {
- ret = PTR_ERR(pt);
- goto out;
- }
- }
-
- fill_px(pt, vm->scratch[lvl].encode);
- } else {
- if (!pt) {
- pt = alloc_pt(vm);
- if (IS_ERR(pt)) {
- ret = PTR_ERR(pt);
- goto out;
- }
- }
-
- if (intel_vgpu_active(vm->i915) ||
- gen8_pt_count(*start, end) < I915_PDES)
- fill_px(pt, vm->scratch[lvl].encode);
- }
+ pt = stash->pt[!!lvl];
+ __i915_gem_object_pin_pages(pt->base);
+ i915_gem_object_make_unshrinkable(pt->base);
+
+ if (lvl ||
+ gen8_pt_count(*start, end) < I915_PDES ||
+ intel_vgpu_active(vm->i915))
+ fill_px(pt, vm->scratch[lvl]->encode);
spin_lock(&pd->lock);
- if (likely(!pd->entry[idx]))
+ if (likely(!pd->entry[idx])) {
+ stash->pt[!!lvl] = pt->stash;
+ atomic_set(&pt->used, 0);
set_pd_entry(pd, idx, pt);
- else
- alloc = pt, pt = pd->entry[idx];
+ } else {
+ pt = pd->entry[idx];
+ }
}
if (lvl) {
atomic_inc(&pt->used);
spin_unlock(&pd->lock);
- ret = __gen8_ppgtt_alloc(vm, as_pd(pt),
- start, end, lvl);
- if (unlikely(ret)) {
- if (release_pd_entry(pd, idx, pt, scratch))
- free_px(vm, pt);
- goto out;
- }
+ __gen8_ppgtt_alloc(vm, stash,
+ as_pd(pt), start, end, lvl);
spin_lock(&pd->lock);
atomic_dec(&pt->used);
@@ -359,18 +339,12 @@ static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
}
} while (idx++, --len);
spin_unlock(&pd->lock);
-out:
- if (alloc)
- free_px(vm, alloc);
- return ret;
}
-static int gen8_ppgtt_alloc(struct i915_address_space *vm,
- u64 start, u64 length)
+static void gen8_ppgtt_alloc(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ u64 start, u64 length)
{
- u64 from;
- int err;
-
GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
GEM_BUG_ON(range_overflows(start, length, vm->total));
@@ -378,25 +352,9 @@ static int gen8_ppgtt_alloc(struct i915_address_space *vm,
start >>= GEN8_PTE_SHIFT;
length >>= GEN8_PTE_SHIFT;
GEM_BUG_ON(length == 0);
- from = start;
- err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd,
- &start, start + length, vm->top);
- if (unlikely(err && from != start))
- __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
- from, start, vm->top);
-
- return err;
-}
-
-static __always_inline void
-write_pte(gen8_pte_t *pte, const gen8_pte_t val)
-{
- /* Magic delays? Or can we refine these to flush all in one pass? */
- *pte = val;
- wmb(); /* cpu to cache */
- clflush(pte); /* cache to memory */
- wmb(); /* visible to all */
+ __gen8_ppgtt_alloc(vm, stash, i915_vm_to_ppgtt(vm)->pd,
+ &start, start + length, vm->top);
}
static __always_inline u64
@@ -415,8 +373,7 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
do {
GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
- write_pte(&vaddr[gen8_pd_index(idx, 0)],
- pte_encode | iter->dma);
+ vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
iter->dma += I915_GTT_PAGE_SIZE;
if (iter->dma >= iter->max) {
@@ -439,10 +396,12 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
pd = pdp->entry[gen8_pd_index(idx, 2)];
}
+ clflush_cache_range(vaddr, PAGE_SIZE);
kunmap_atomic(vaddr);
vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
}
} while (1);
+ clflush_cache_range(vaddr, PAGE_SIZE);
kunmap_atomic(vaddr);
return idx;
@@ -498,7 +457,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
do {
GEM_BUG_ON(iter->sg->length < page_size);
- write_pte(&vaddr[index++], encode | iter->dma);
+ vaddr[index++] = encode | iter->dma;
start += page_size;
iter->dma += page_size;
@@ -523,6 +482,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
}
} while (rem >= page_size && index < I915_PDES);
+ clflush_cache_range(vaddr, PAGE_SIZE);
kunmap_atomic(vaddr);
/*
@@ -554,7 +514,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
u16 i;
- encode = vma->vm->scratch[0].encode;
+ encode = vma->vm->scratch[0]->encode;
vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
for (i = 1; i < index; i += 16)
@@ -608,27 +568,37 @@ static int gen8_init_scratch(struct i915_address_space *vm)
GEM_BUG_ON(!clone->has_read_only);
vm->scratch_order = clone->scratch_order;
- memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch));
- px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */
+ for (i = 0; i <= vm->top; i++)
+ vm->scratch[i] = i915_gem_object_get(clone->scratch[i]);
+
return 0;
}
- ret = setup_scratch_page(vm, __GFP_HIGHMEM);
+ ret = setup_scratch_page(vm);
if (ret)
return ret;
- vm->scratch[0].encode =
- gen8_pte_encode(px_dma(&vm->scratch[0]),
+ vm->scratch[0]->encode =
+ gen8_pte_encode(px_dma(vm->scratch[0]),
I915_CACHE_LLC, vm->has_read_only);
for (i = 1; i <= vm->top; i++) {
- if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i]))))
+ struct drm_i915_gem_object *obj;
+
+ obj = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
+ if (IS_ERR(obj))
+ goto free_scratch;
+
+ ret = pin_pt_dma(vm, obj);
+ if (ret) {
+ i915_gem_object_put(obj);
goto free_scratch;
+ }
- fill_px(&vm->scratch[i], vm->scratch[i - 1].encode);
- vm->scratch[i].encode =
- gen8_pde_encode(px_dma(&vm->scratch[i]),
- I915_CACHE_LLC);
+ fill_px(obj, vm->scratch[i - 1]->encode);
+ obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_LLC);
+
+ vm->scratch[i] = obj;
}
return 0;
@@ -649,12 +619,20 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
struct i915_page_directory *pde;
+ int err;
pde = alloc_pd(vm);
if (IS_ERR(pde))
return PTR_ERR(pde);
- fill_px(pde, vm->scratch[1].encode);
+ err = pin_pt_dma(vm, pde->pt.base);
+ if (err) {
+ i915_gem_object_put(pde->pt.base);
+ free_pd(vm, pde);
+ return err;
+ }
+
+ fill_px(pde, vm->scratch[1]->encode);
set_pd_entry(pd, idx, pde);
atomic_inc(px_used(pde)); /* keep pinned */
}
@@ -668,21 +646,32 @@ gen8_alloc_top_pd(struct i915_address_space *vm)
{
const unsigned int count = gen8_pd_top_count(vm);
struct i915_page_directory *pd;
+ int err;
- GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
+ GEM_BUG_ON(count > I915_PDES);
- pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
+ pd = __alloc_pd(count);
if (unlikely(!pd))
return ERR_PTR(-ENOMEM);
- if (unlikely(setup_page_dma(vm, px_base(pd)))) {
- kfree(pd);
- return ERR_PTR(-ENOMEM);
+ pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
+ if (IS_ERR(pd->pt.base)) {
+ err = PTR_ERR(pd->pt.base);
+ pd->pt.base = NULL;
+ goto err_pd;
}
- fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count);
+ err = pin_pt_dma(vm, pd->pt.base);
+ if (err)
+ goto err_pd;
+
+ fill_page_dma(px_base(pd), vm->scratch[vm->top]->encode, count);
atomic_inc(px_used(pd)); /* mark as pinned */
return pd;
+
+err_pd:
+ free_pd(vm, pd);
+ return ERR_PTR(err);
}
/*
@@ -703,6 +692,7 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
ppgtt_init(ppgtt, gt);
ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
+ ppgtt->vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen8_pte_t));
/*
* From bdw, there is hw support for read-only pages in the PPGTT.
@@ -714,12 +704,7 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
*/
ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12);
- /*
- * There are only few exceptions for gen >=6. chv and bxt.
- * And we are not sure about the latter so play safe for now.
- */
- if (IS_CHERRYVIEW(gt->i915) || IS_BROXTON(gt->i915))
- ppgtt->vm.pt_kmap_wc = true;
+ ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
err = gen8_init_scratch(&ppgtt->vm);
if (err)