diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_lrc.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_lrc.c | 250 |
1 files changed, 119 insertions, 131 deletions
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index d84c7815ee0c..1effbf49fa08 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -133,7 +133,6 @@ */ #include <linux/interrupt.h> -#include <drm/drmP.h> #include <drm/i915_drm.h> #include "i915_drv.h" #include "i915_gem_render_state.h" @@ -363,31 +362,12 @@ execlists_context_schedule_out(struct i915_request *rq, unsigned long status) trace_i915_request_out(rq); } -static void -execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state) -{ - ASSIGN_CTX_PDP(ppgtt, reg_state, 3); - ASSIGN_CTX_PDP(ppgtt, reg_state, 2); - ASSIGN_CTX_PDP(ppgtt, reg_state, 1); - ASSIGN_CTX_PDP(ppgtt, reg_state, 0); -} - static u64 execlists_update_context(struct i915_request *rq) { - struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt; struct intel_context *ce = rq->hw_context; - u32 *reg_state = ce->lrc_reg_state; - reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail); - - /* - * True 32b PPGTT with dynamic page allocation: update PDP - * registers and point the unallocated PDPs to scratch page. - * PML4 is allocated during ppgtt init, so this is not needed - * in 48-bit mode. - */ - if (!i915_vm_is_48bit(&ppgtt->vm)) - execlists_update_context_pdps(ppgtt, reg_state); + ce->lrc_reg_state[CTX_RING_TAIL + 1] = + intel_ring_set_tail(rq->ring, rq->tail); /* * Make sure the context image is complete before we submit it to HW. @@ -770,6 +750,13 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists) execlists_clear_all_active(execlists); } +static inline void +invalidate_csb_entries(const u32 *first, const u32 *last) +{ + clflush((void *)first); + clflush((void *)last); +} + static void reset_csb_pointers(struct intel_engine_execlists *execlists) { const unsigned int reset_value = GEN8_CSB_ENTRIES - 1; @@ -785,6 +772,9 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists) */ execlists->csb_head = reset_value; WRITE_ONCE(*execlists->csb_write, reset_value); + + invalidate_csb_entries(&execlists->csb_status[0], + &execlists->csb_status[GEN8_CSB_ENTRIES - 1]); } static void nop_submission_tasklet(unsigned long data) @@ -1020,6 +1010,19 @@ static void process_csb(struct intel_engine_cs *engine) } while (head != tail); execlists->csb_head = head; + + /* + * Gen11 has proven to fail wrt global observation point between + * entry and tail update, failing on the ordering and thus + * we see an old entry in the context status buffer. + * + * Forcibly evict out entries for the next gpu csb update, + * to increase the odds that we get a fresh entries with non + * working hardware. The cost for doing so comes out mostly with + * the wash as hardware, working or not, will need to do the + * invalidation before. + */ + invalidate_csb_entries(&buf[0], &buf[GEN8_CSB_ENTRIES - 1]); } static void __execlists_submission_tasklet(struct intel_engine_cs *const engine) @@ -1247,29 +1250,88 @@ execlists_context_pin(struct intel_engine_cs *engine, return __execlists_context_pin(engine, ctx, ce); } +static int emit_pdps(struct i915_request *rq) +{ + const struct intel_engine_cs * const engine = rq->engine; + struct i915_hw_ppgtt * const ppgtt = rq->gem_context->ppgtt; + int err, i; + u32 *cs; + + GEM_BUG_ON(intel_vgpu_active(rq->i915)); + + /* + * Beware ye of the dragons, this sequence is magic! + * + * Small changes to this sequence can cause anything from + * GPU hangs to forcewake errors and machine lockups! + */ + + /* Flush any residual operations from the context load */ + err = engine->emit_flush(rq, EMIT_FLUSH); + if (err) + return err; + + /* Magic required to prevent forcewake errors! */ + err = engine->emit_flush(rq, EMIT_INVALIDATE); + if (err) + return err; + + cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* Ensure the LRI have landed before we invalidate & continue */ + *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; + for (i = GEN8_3LVL_PDPES; i--; ) { + const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); + + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i)); + *cs++ = upper_32_bits(pd_daddr); + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i)); + *cs++ = lower_32_bits(pd_daddr); + } + *cs++ = MI_NOOP; + + intel_ring_advance(rq, cs); + + /* Be doubly sure the LRI have landed before proceeding */ + err = engine->emit_flush(rq, EMIT_FLUSH); + if (err) + return err; + + /* Re-invalidate the TLB for luck */ + return engine->emit_flush(rq, EMIT_INVALIDATE); +} + static int execlists_request_alloc(struct i915_request *request) { int ret; GEM_BUG_ON(!request->hw_context->pin_count); - /* Flush enough space to reduce the likelihood of waiting after + /* + * Flush enough space to reduce the likelihood of waiting after * we start building the request - in which case we will just * have to repeat work. */ request->reserved_space += EXECLISTS_REQUEST_SIZE; - ret = intel_ring_wait_for_space(request->ring, request->reserved_space); - if (ret) - return ret; - - /* Note that after this point, we have committed to using + /* + * Note that after this point, we have committed to using * this request as it is being used to both track the * state of engine initialisation and liveness of the * golden renderstate above. Think twice before you try * to cancel/unwind this request now. */ + /* Unconditionally invalidate GPU caches and TLBs. */ + if (i915_vm_is_48bit(&request->gem_context->ppgtt->vm)) + ret = request->engine->emit_flush(request, EMIT_INVALIDATE); + else + ret = emit_pdps(request); + if (ret) + return ret; + request->reserved_space -= EXECLISTS_REQUEST_SIZE; return 0; } @@ -1592,7 +1654,7 @@ static void enable_execlists(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff); + intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */ /* * Make sure we're not enabling the new 12-deep CSB @@ -1633,6 +1695,7 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine) static int gen8_init_common_ring(struct intel_engine_cs *engine) { intel_engine_apply_workarounds(engine); + intel_engine_apply_whitelist(engine); intel_mocs_init_engine(engine); @@ -1649,43 +1712,6 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine) return 0; } -static int gen8_init_render_ring(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - int ret; - - ret = gen8_init_common_ring(engine); - if (ret) - return ret; - - intel_engine_apply_whitelist(engine); - - /* We need to disable the AsyncFlip performance optimisations in order - * to use MI_WAIT_FOR_EVENT within the CS. It should already be - * programmed to '1' on all products. - * - * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv - */ - I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); - - I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); - - return 0; -} - -static int gen9_init_render_ring(struct intel_engine_cs *engine) -{ - int ret; - - ret = gen8_init_common_ring(engine); - if (ret) - return ret; - - intel_engine_apply_whitelist(engine); - - return 0; -} - static struct i915_request * execlists_reset_prepare(struct intel_engine_cs *engine) { @@ -1841,56 +1867,11 @@ static void execlists_reset_finish(struct intel_engine_cs *engine) atomic_read(&execlists->tasklet.count)); } -static int intel_logical_ring_emit_pdps(struct i915_request *rq) -{ - struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt; - struct intel_engine_cs *engine = rq->engine; - const int num_lri_cmds = GEN8_3LVL_PDPES * 2; - u32 *cs; - int i; - - cs = intel_ring_begin(rq, num_lri_cmds * 2 + 2); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - *cs++ = MI_LOAD_REGISTER_IMM(num_lri_cmds); - for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) { - const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); - - *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i)); - *cs++ = upper_32_bits(pd_daddr); - *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i)); - *cs++ = lower_32_bits(pd_daddr); - } - - *cs++ = MI_NOOP; - intel_ring_advance(rq, cs); - - return 0; -} - static int gen8_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, const unsigned int flags) { u32 *cs; - int ret; - - /* Don't rely in hw updating PDPs, specially in lite-restore. - * Ideally, we should set Force PD Restore in ctx descriptor, - * but we can't. Force Restore would be a second option, but - * it is unsafe in case of lite-restore (because the ctx is - * not idle). PML4 is allocated during ppgtt init so this is - * not needed in 48-bit.*/ - if ((intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) && - !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) && - !intel_vgpu_active(rq->i915)) { - ret = intel_logical_ring_emit_pdps(rq); - if (ret) - return ret; - - rq->gem_context->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine); - } cs = intel_ring_begin(rq, 6); if (IS_ERR(cs)) @@ -1923,6 +1904,7 @@ static int gen8_emit_bb_start(struct i915_request *rq, *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); return 0; @@ -2007,7 +1989,7 @@ static int gen8_emit_flush_render(struct i915_request *request, * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL * pipe control. */ - if (IS_GEN9(request->i915)) + if (IS_GEN(request->i915, 9)) vf_flush_wa = true; /* WaForGAMHang:kbl */ @@ -2078,10 +2060,18 @@ static void gen8_emit_breadcrumb_rcs(struct i915_request *request, u32 *cs) /* We're using qword write, seqno should be aligned to 8 bytes. */ BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1); - cs = gen8_emit_ggtt_write_rcs(cs, request->global_seqno, - intel_hws_seqno_address(request->engine)); + cs = gen8_emit_ggtt_write_rcs(cs, + request->global_seqno, + intel_hws_seqno_address(request->engine), + PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | + PIPE_CONTROL_DEPTH_CACHE_FLUSH | + PIPE_CONTROL_DC_FLUSH_ENABLE | + PIPE_CONTROL_FLUSH_ENABLE | + PIPE_CONTROL_CS_STALL); + *cs++ = MI_USER_INTERRUPT; *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + request->tail = intel_ring_offset(request, cs); assert_ring_tail_valid(request->ring, request->tail); @@ -2244,6 +2234,8 @@ static int logical_ring_init(struct intel_engine_cs *engine) if (ret) return ret; + intel_engine_init_workarounds(engine); + if (HAS_LOGICAL_RING_ELSQ(i915)) { execlists->submit_reg = i915->regs + i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine)); @@ -2276,19 +2268,11 @@ static int logical_ring_init(struct intel_engine_cs *engine) int logical_render_ring_init(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; int ret; logical_ring_setup(engine); - if (HAS_L3_DPF(dev_priv)) - engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; - /* Override some for render ring. */ - if (INTEL_GEN(dev_priv) >= 9) - engine->init_hw = gen9_init_render_ring; - else - engine->init_hw = gen8_init_render_ring; engine->init_context = gen8_init_rcs_context; engine->emit_flush = gen8_emit_flush_render; engine->emit_breadcrumb = gen8_emit_breadcrumb_rcs; @@ -2310,7 +2294,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine) } intel_engine_init_whitelist(engine); - intel_engine_init_workarounds(engine); return 0; } @@ -2325,9 +2308,9 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine) static u32 make_rpcs(struct drm_i915_private *dev_priv) { - bool subslice_pg = INTEL_INFO(dev_priv)->sseu.has_subslice_pg; - u8 slices = hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask); - u8 subslices = hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask[0]); + bool subslice_pg = RUNTIME_INFO(dev_priv)->sseu.has_subslice_pg; + u8 slices = hweight8(RUNTIME_INFO(dev_priv)->sseu.slice_mask); + u8 subslices = hweight8(RUNTIME_INFO(dev_priv)->sseu.subslice_mask[0]); u32 rpcs = 0; /* @@ -2362,7 +2345,7 @@ make_rpcs(struct drm_i915_private *dev_priv) * subslices are enabled, or a count between one and four on the first * slice. */ - if (IS_GEN11(dev_priv) && slices == 1 && subslices >= 4) { + if (IS_GEN(dev_priv, 11) && slices == 1 && subslices >= 4) { GEM_BUG_ON(subslices & 1); subslice_pg = false; @@ -2375,7 +2358,7 @@ make_rpcs(struct drm_i915_private *dev_priv) * must make an explicit request through RPCS for full * enablement. */ - if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) { + if (RUNTIME_INFO(dev_priv)->sseu.has_slice_pg) { u32 mask, val = slices; if (INTEL_GEN(dev_priv) >= 11) { @@ -2403,17 +2386,17 @@ make_rpcs(struct drm_i915_private *dev_priv) rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_SS_CNT_ENABLE | val; } - if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) { + if (RUNTIME_INFO(dev_priv)->sseu.has_eu_pg) { u32 val; - val = INTEL_INFO(dev_priv)->sseu.eu_per_subslice << + val = RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice << GEN8_RPCS_EU_MIN_SHIFT; GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK); val &= GEN8_RPCS_EU_MIN_MASK; rpcs |= val; - val = INTEL_INFO(dev_priv)->sseu.eu_per_subslice << + val = RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice << GEN8_RPCS_EU_MAX_SHIFT; GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK); val &= GEN8_RPCS_EU_MAX_MASK; @@ -2538,6 +2521,11 @@ static void execlists_init_reg_state(u32 *regs, * other PDP Descriptors are ignored. */ ASSIGN_CTX_PML4(ctx->ppgtt, regs); + } else { + ASSIGN_CTX_PDP(ctx->ppgtt, regs, 3); + ASSIGN_CTX_PDP(ctx->ppgtt, regs, 2); + ASSIGN_CTX_PDP(ctx->ppgtt, regs, 1); + ASSIGN_CTX_PDP(ctx->ppgtt, regs, 0); } if (rcs) { |