From b84cf5363c2a529d154edca1f09402ac8df9995c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 21 Nov 2016 11:31:09 +0000 Subject: drm/i915: Use user, not driver, DRM_DEBUG for 2 context ioctls For user actions, such as the context ioctls, we prefer to use DRM_DEBUG rather than DRM_DEBUG_DRIVER as currently used. Signed-off-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/20161121113109.1976-1-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_gem_context.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 1f94b8d6d83d..d95dfec7166e 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -1027,7 +1027,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, return PTR_ERR(ctx); args->ctx_id = ctx->user_handle; - DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id); + DRM_DEBUG("HW context %d created\n", args->ctx_id); return 0; } @@ -1060,7 +1060,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, context_close(ctx); mutex_unlock(&dev->struct_mutex); - DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id); + DRM_DEBUG("HW context %d destroyed\n", args->ctx_id); return 0; } -- cgit v1.2.3 From 381744f8062239c3e08b9c79f8f7941c521d46c2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 21 Nov 2016 11:07:59 +0000 Subject: drm/i915: Add a warning on shutdown if signal threads still active When unloading the module, it is expected that we have finished executing all requests and so the signal threads should be idle. Add a warning in case there are any residual requests in the signaler rbtrees at that point. v2: We can also warn if there are any waiters Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20161121110759.22896-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_breadcrumbs.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index c9c46a538edb..53ae7884babd 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -623,6 +623,12 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) { struct intel_breadcrumbs *b = &engine->breadcrumbs; + /* The engines should be idle and all requests accounted for! */ + WARN_ON(READ_ONCE(b->first_wait)); + WARN_ON(!RB_EMPTY_ROOT(&b->waiters)); + WARN_ON(READ_ONCE(b->first_signal)); + WARN_ON(!RB_EMPTY_ROOT(&b->signals)); + if (!IS_ERR_OR_NULL(b->signaler)) kthread_stop(b->signaler); -- cgit v1.2.3 From 58415918d4432d22f4ff9d3b2e2e8959cfb716cb Mon Sep 17 00:00:00 2001 From: "A.Sunil Kamath" Date: Sun, 20 Nov 2016 23:20:26 +0530 Subject: drm/i915: Use num_scalers instead of SKL_NUM_SCALERS in debugfs Better to use num_scaler itself while printing scaler_info. This fixes a bug of printing information for the missing second scaler on pipe C for SKL platform. Signed-off-by: A.Sunil Kamath Reviewed-by: Ander Conselvan de Oliveira Signed-off-by: Ander Conselvan de Oliveira Link: http://patchwork.freedesktop.org/patch/msgid/1479664226-22307-1-git-send-email-sunil.kamath@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 96407f684f7f..fb47efdfb448 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -3080,7 +3080,7 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc) pipe_config->scaler_state.scaler_users, pipe_config->scaler_state.scaler_id); - for (i = 0; i < SKL_NUM_SCALERS; i++) { + for (i = 0; i < num_scalers; i++) { struct intel_scaler *sc = &pipe_config->scaler_state.scalers[i]; -- cgit v1.2.3 From b2251c082044969df965a42bd08754d2fedd333a Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Wed, 16 Nov 2016 11:33:26 +0200 Subject: drm/i915: Add more keywords to firmware loading message To find out what firmware we actually loaded (from dmesg) the explicit 'dmc' and 'firmware' are missing from the info printout. Add them. Cc: Imre Deak Reviewed-by: Imre Deak Signed-off-by: Mika Kuoppala Link: http://patchwork.freedesktop.org/patch/msgid/1479288806-17355-1-git-send-email-mika.kuoppala@intel.com --- drivers/gpu/drm/i915/intel_csr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index d7a04bca8c28..10e9abcf0932 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -405,7 +405,7 @@ static void csr_load_work_fn(struct work_struct *work) intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); - DRM_INFO("Finished loading %s (v%u.%u)\n", + DRM_INFO("Finished loading DMC firmware %s (v%u.%u)\n", dev_priv->csr.fw_path, CSR_VERSION_MAJOR(csr->version), CSR_VERSION_MINOR(csr->version)); -- cgit v1.2.3 From 6e16d028e441b0b2c141aaecb39f4838cd2964b5 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Wed, 16 Nov 2016 17:20:29 +0200 Subject: drm/i915: Split up hangcheck phases In order to simplify hangcheck state keeping, split hangcheck per engine loop in three phases: state load, action, state save. Add few more hangcheck actions to separate between seqno, head and subunit movements. This helps to gather all the hangcheck actions under a single switch umbrella. Cc: Chris Wilson Signed-off-by: Mika Kuoppala Reviewed-by: Chris Wilson Signed-off-by: Mika Kuoppala --- drivers/gpu/drm/i915/i915_gpu_error.c | 8 +- drivers/gpu/drm/i915/intel_hangcheck.c | 241 ++++++++++++++++++-------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 4 +- 3 files changed, 146 insertions(+), 107 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index ae84aa4b1467..4bcf1a0f5675 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -323,8 +323,12 @@ static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a) return "idle"; case HANGCHECK_WAIT: return "wait"; - case HANGCHECK_ACTIVE: - return "active"; + case HANGCHECK_ACTIVE_SEQNO: + return "active seqno"; + case HANGCHECK_ACTIVE_HEAD: + return "active head"; + case HANGCHECK_ACTIVE_SUBUNITS: + return "active subunits"; case HANGCHECK_KICK: return "kick"; case HANGCHECK_HUNG: diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c index 53df5b11bff4..3d2e81c81252 100644 --- a/drivers/gpu/drm/i915/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/intel_hangcheck.c @@ -236,11 +236,11 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd) memset(&engine->hangcheck.instdone, 0, sizeof(engine->hangcheck.instdone)); - return HANGCHECK_ACTIVE; + return HANGCHECK_ACTIVE_HEAD; } if (!subunits_stuck(engine)) - return HANGCHECK_ACTIVE; + return HANGCHECK_ACTIVE_SUBUNITS; return HANGCHECK_HUNG; } @@ -291,6 +291,129 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd) return HANGCHECK_HUNG; } +static void hangcheck_load_sample(struct intel_engine_cs *engine, + struct intel_engine_hangcheck *hc) +{ + /* We don't strictly need an irq-barrier here, as we are not + * serving an interrupt request, be paranoid in case the + * barrier has side-effects (such as preventing a broken + * cacheline snoop) and so be sure that we can see the seqno + * advance. If the seqno should stick, due to a stale + * cacheline, we would erroneously declare the GPU hung. + */ + if (engine->irq_seqno_barrier) + engine->irq_seqno_barrier(engine); + + hc->acthd = intel_engine_get_active_head(engine); + hc->seqno = intel_engine_get_seqno(engine); + hc->score = engine->hangcheck.score; +} + +static void hangcheck_store_sample(struct intel_engine_cs *engine, + const struct intel_engine_hangcheck *hc) +{ + engine->hangcheck.acthd = hc->acthd; + engine->hangcheck.seqno = hc->seqno; + engine->hangcheck.score = hc->score; + engine->hangcheck.action = hc->action; +} + +static enum intel_engine_hangcheck_action +hangcheck_get_action(struct intel_engine_cs *engine, + const struct intel_engine_hangcheck *hc) +{ + if (engine->hangcheck.seqno != hc->seqno) + return HANGCHECK_ACTIVE_SEQNO; + + if (i915_seqno_passed(hc->seqno, intel_engine_last_submit(engine))) + return HANGCHECK_IDLE; + + return engine_stuck(engine, hc->acthd); +} + +static void hangcheck_accumulate_sample(struct intel_engine_cs *engine, + struct intel_engine_hangcheck *hc) +{ + hc->action = hangcheck_get_action(engine, hc); + + switch (hc->action) { + case HANGCHECK_IDLE: + case HANGCHECK_WAIT: + break; + + case HANGCHECK_ACTIVE_HEAD: + case HANGCHECK_ACTIVE_SUBUNITS: + /* We always increment the hangcheck score + * if the engine is busy and still processing + * the same request, so that no single request + * can run indefinitely (such as a chain of + * batches). The only time we do not increment + * the hangcheck score on this ring, if this + * engine is in a legitimate wait for another + * engine. In that case the waiting engine is a + * victim and we want to be sure we catch the + * right culprit. Then every time we do kick + * the ring, add a small increment to the + * score so that we can catch a batch that is + * being repeatedly kicked and so responsible + * for stalling the machine. + */ + hc->score += 1; + break; + + case HANGCHECK_KICK: + hc->score += 5; + break; + + case HANGCHECK_HUNG: + hc->score += 20; + break; + + case HANGCHECK_ACTIVE_SEQNO: + /* Gradually reduce the count so that we catch DoS + * attempts across multiple batches. + */ + if (hc->score > 0) + hc->score -= 15; + if (hc->score < 0) + hc->score = 0; + + /* Clear head and subunit states on seqno movement */ + hc->acthd = 0; + + memset(&engine->hangcheck.instdone, 0, + sizeof(engine->hangcheck.instdone)); + break; + + default: + MISSING_CASE(hc->action); + } +} + +static void hangcheck_declare_hang(struct drm_i915_private *i915, + unsigned int hung, + unsigned int stuck) +{ + struct intel_engine_cs *engine; + char msg[80]; + unsigned int tmp; + int len; + + /* If some rings hung but others were still busy, only + * blame the hanging rings in the synopsis. + */ + if (stuck != hung) + hung &= ~stuck; + len = scnprintf(msg, sizeof(msg), + "%s on ", stuck == hung ? "No progress" : "Hang"); + for_each_engine_masked(engine, i915, hung, tmp) + len += scnprintf(msg + len, sizeof(msg) - len, + "%s, ", engine->name); + msg[len-2] = '\0'; + + return i915_handle_error(i915, hung, msg); +} + /* * This is called when the chip hasn't reported back with completed * batchbuffers in a long time. We keep track per ring seqno progress and @@ -308,10 +431,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work) enum intel_engine_id id; unsigned int hung = 0, stuck = 0; int busy_count = 0; -#define BUSY 1 -#define KICK 5 -#define HUNG 20 -#define ACTIVE_DECAY 15 if (!i915.enable_hangcheck) return; @@ -326,112 +445,26 @@ static void i915_hangcheck_elapsed(struct work_struct *work) intel_uncore_arm_unclaimed_mmio_detection(dev_priv); for_each_engine(engine, dev_priv, id) { - bool busy = intel_engine_has_waiter(engine); - u64 acthd; - u32 seqno; - u32 submit; + struct intel_engine_hangcheck cur_state, *hc = &cur_state; + const bool busy = intel_engine_has_waiter(engine); semaphore_clear_deadlocks(dev_priv); - /* We don't strictly need an irq-barrier here, as we are not - * serving an interrupt request, be paranoid in case the - * barrier has side-effects (such as preventing a broken - * cacheline snoop) and so be sure that we can see the seqno - * advance. If the seqno should stick, due to a stale - * cacheline, we would erroneously declare the GPU hung. - */ - if (engine->irq_seqno_barrier) - engine->irq_seqno_barrier(engine); - - acthd = intel_engine_get_active_head(engine); - seqno = intel_engine_get_seqno(engine); - submit = intel_engine_last_submit(engine); - - if (engine->hangcheck.seqno == seqno) { - if (i915_seqno_passed(seqno, submit)) { - engine->hangcheck.action = HANGCHECK_IDLE; - } else { - /* We always increment the hangcheck score - * if the engine is busy and still processing - * the same request, so that no single request - * can run indefinitely (such as a chain of - * batches). The only time we do not increment - * the hangcheck score on this ring, if this - * engine is in a legitimate wait for another - * engine. In that case the waiting engine is a - * victim and we want to be sure we catch the - * right culprit. Then every time we do kick - * the ring, add a small increment to the - * score so that we can catch a batch that is - * being repeatedly kicked and so responsible - * for stalling the machine. - */ - engine->hangcheck.action = - engine_stuck(engine, acthd); - - switch (engine->hangcheck.action) { - case HANGCHECK_IDLE: - case HANGCHECK_WAIT: - break; - case HANGCHECK_ACTIVE: - engine->hangcheck.score += BUSY; - break; - case HANGCHECK_KICK: - engine->hangcheck.score += KICK; - break; - case HANGCHECK_HUNG: - engine->hangcheck.score += HUNG; - break; - } - } - - if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { - hung |= intel_engine_flag(engine); - if (engine->hangcheck.action != HANGCHECK_HUNG) - stuck |= intel_engine_flag(engine); - } - } else { - engine->hangcheck.action = HANGCHECK_ACTIVE; - - /* Gradually reduce the count so that we catch DoS - * attempts across multiple batches. - */ - if (engine->hangcheck.score > 0) - engine->hangcheck.score -= ACTIVE_DECAY; - if (engine->hangcheck.score < 0) - engine->hangcheck.score = 0; - - /* Clear head and subunit states on seqno movement */ - acthd = 0; - - memset(&engine->hangcheck.instdone, 0, - sizeof(engine->hangcheck.instdone)); + hangcheck_load_sample(engine, hc); + hangcheck_accumulate_sample(engine, hc); + hangcheck_store_sample(engine, hc); + + if (hc->score >= HANGCHECK_SCORE_RING_HUNG) { + hung |= intel_engine_flag(engine); + if (hc->action != HANGCHECK_HUNG) + stuck |= intel_engine_flag(engine); } - engine->hangcheck.seqno = seqno; - engine->hangcheck.acthd = acthd; busy_count += busy; } - if (hung) { - char msg[80]; - unsigned int tmp; - int len; - - /* If some rings hung but others were still busy, only - * blame the hanging rings in the synopsis. - */ - if (stuck != hung) - hung &= ~stuck; - len = scnprintf(msg, sizeof(msg), - "%s on ", stuck == hung ? "No progress" : "Hang"); - for_each_engine_masked(engine, dev_priv, hung, tmp) - len += scnprintf(msg + len, sizeof(msg) - len, - "%s, ", engine->name); - msg[len-2] = '\0'; - - return i915_handle_error(dev_priv, hung, msg); - } + if (hung) + hangcheck_declare_hang(dev_priv, hung, stuck); /* Reset timer in case GPU hangs without another request being added */ if (busy_count) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 3466b4e77e7c..3152b2b4a202 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -67,7 +67,9 @@ struct intel_hw_status_page { enum intel_engine_hangcheck_action { HANGCHECK_IDLE = 0, HANGCHECK_WAIT, - HANGCHECK_ACTIVE, + HANGCHECK_ACTIVE_SEQNO, + HANGCHECK_ACTIVE_HEAD, + HANGCHECK_ACTIVE_SUBUNITS, HANGCHECK_KICK, HANGCHECK_HUNG, }; -- cgit v1.2.3 From 3fe3b030bd2d7a51c12aa6fe0e5178b9f1a726ec Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Fri, 18 Nov 2016 15:09:04 +0200 Subject: drm/i915: Decouple hang detection from hangcheck period Hangcheck state accumulation has gained more steps along the years, like head movement and more recently the subunit inactivity check. As the subunit sampling is only done if the previous state check showed inactivity, we have added more stages (and time) to reach a hang verdict. Asymmetric engine states led to different actual weight of 'one hangcheck unit' and it was demonstrated in some hangs that due to difference in stages, simpler engines were accused falsely of a hang as their scoring was much more quicker to accumulate above the hang treshold. To completely decouple the hangcheck guilty score from the hangcheck period, convert hangcheck score to a rough period of inactivity measurement. As these are tracked as jiffies, they are meaningful also across reset boundaries. This makes finding a guilty engine more accurate across multi engine activity scenarios, especially across asymmetric engines. We lose the ability to detect cross batch malicious attempts to hinder the progress. Plan is to move this functionality to be part of context banning which is more natural fit, later in the series. v2: use time_before macros (Chris) reinstate the pardoning of moving engine after hc (Chris) v3: avoid global state for per engine stall detection (Chris) v4: take timeline last retirement into account (Chris) v5: do debug print on pardoning, split out retirement timestamp (Chris) Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Mika Kuoppala --- drivers/gpu/drm/i915/i915_debugfs.c | 17 +++-- drivers/gpu/drm/i915/i915_drv.h | 6 +- drivers/gpu/drm/i915/i915_gem.c | 8 ++- drivers/gpu/drm/i915/i915_gpu_error.c | 46 ++++---------- drivers/gpu/drm/i915/intel_hangcheck.c | 108 +++++++++++++++----------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 40 +++++++++--- 6 files changed, 117 insertions(+), 108 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index fb47efdfb448..437212a95b19 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1351,10 +1351,12 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) seq_printf(m, "\tseqno = %x [current %x, last %x]\n", engine->hangcheck.seqno, seqno[id], intel_engine_last_submit(engine)); - seq_printf(m, "\twaiters? %s, fake irq active? %s\n", + seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n", yesno(intel_engine_has_waiter(engine)), yesno(test_bit(engine->id, - &dev_priv->gpu_error.missed_irq_rings))); + &dev_priv->gpu_error.missed_irq_rings)), + yesno(engine->hangcheck.stalled)); + spin_lock_irq(&b->lock); for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { struct intel_wait *w = container_of(rb, typeof(*w), node); @@ -1367,8 +1369,11 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", (long long)engine->hangcheck.acthd, (long long)acthd[id]); - seq_printf(m, "\tscore = %d\n", engine->hangcheck.score); - seq_printf(m, "\taction = %d\n", engine->hangcheck.action); + seq_printf(m, "\taction = %s(%d) %d ms ago\n", + hangcheck_action_to_str(engine->hangcheck.action), + engine->hangcheck.action, + jiffies_to_msecs(jiffies - + engine->hangcheck.action_timestamp)); if (engine->id == RCS) { seq_puts(m, "\tinstdone read =\n"); @@ -3162,11 +3167,11 @@ static int i915_engine_info(struct seq_file *m, void *unused) u64 addr; seq_printf(m, "%s\n", engine->name); - seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [score %d]\n", + seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n", intel_engine_get_seqno(engine), intel_engine_last_submit(engine), engine->hangcheck.seqno, - engine->hangcheck.score); + jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp)); rcu_read_lock(); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 56002a52936d..0ebec2b77ae1 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -800,7 +800,8 @@ struct drm_i915_error_state { /* Software tracked state */ bool waiting; int num_waiters; - int hangcheck_score; + unsigned long hangcheck_timestamp; + bool hangcheck_stalled; enum intel_engine_hangcheck_action hangcheck_action; struct i915_address_space *vm; int num_requests; @@ -1446,6 +1447,9 @@ struct i915_error_state_file_priv { #define I915_RESET_TIMEOUT (10 * HZ) /* 10s */ #define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */ +#define I915_ENGINE_DEAD_TIMEOUT (4 * HZ) /* Seqno, head and subunits dead */ +#define I915_SEQNO_DEAD_TIMEOUT (12 * HZ) /* Seqno dead with active head */ + struct i915_gpu_error { /* For hangcheck timer */ #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 902fa427c196..1f8dfd4aba61 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2705,9 +2705,13 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine) if (!request) return; - ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG; - if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) + ring_hung = engine->hangcheck.stalled; + if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) { + DRM_DEBUG_DRIVER("%s pardoned, was guilty? %s\n", + engine->name, + yesno(ring_hung)); ring_hung = false; + } i915_set_reset_status(request->ctx, ring_hung); if (!ring_hung) diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 4bcf1a0f5675..d5a4ec9b507f 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -316,28 +316,6 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, } } -static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a) -{ - switch (a) { - case HANGCHECK_IDLE: - return "idle"; - case HANGCHECK_WAIT: - return "wait"; - case HANGCHECK_ACTIVE_SEQNO: - return "active seqno"; - case HANGCHECK_ACTIVE_HEAD: - return "active head"; - case HANGCHECK_ACTIVE_SUBUNITS: - return "active subunits"; - case HANGCHECK_KICK: - return "kick"; - case HANGCHECK_HUNG: - return "hung"; - } - - return "unknown"; -} - static void error_print_instdone(struct drm_i915_error_state_buf *m, struct drm_i915_error_engine *ee) { @@ -445,9 +423,13 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, err_printf(m, " waiting: %s\n", yesno(ee->waiting)); err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head); err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail); - err_printf(m, " hangcheck: %s [%d]\n", - hangcheck_action_to_str(ee->hangcheck_action), - ee->hangcheck_score); + err_printf(m, " hangcheck stall: %s\n", yesno(ee->hangcheck_stalled)); + err_printf(m, " hangcheck action: %s\n", + hangcheck_action_to_str(ee->hangcheck_action)); + err_printf(m, " hangcheck action timestamp: %lu, %u ms ago\n", + ee->hangcheck_timestamp, + jiffies_to_msecs(jiffies - ee->hangcheck_timestamp)); + error_print_request(m, " ELSP[0]: ", &ee->execlist[0]); error_print_request(m, " ELSP[1]: ", &ee->execlist[1]); } @@ -536,7 +518,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, struct pci_dev *pdev = dev_priv->drm.pdev; struct drm_i915_error_state *error = error_priv->error; struct drm_i915_error_object *obj; - int max_hangcheck_score; int i, j; if (!error) { @@ -553,13 +534,9 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, err_printf(m, "Uptime: %ld s %ld us\n", error->uptime.tv_sec, error->uptime.tv_usec); err_print_capabilities(m, &error->device_info); - max_hangcheck_score = 0; - for (i = 0; i < ARRAY_SIZE(error->engine); i++) { - if (error->engine[i].hangcheck_score > max_hangcheck_score) - max_hangcheck_score = error->engine[i].hangcheck_score; - } + for (i = 0; i < ARRAY_SIZE(error->engine); i++) { - if (error->engine[i].hangcheck_score == max_hangcheck_score && + if (error->engine[i].hangcheck_stalled && error->engine[i].pid != -1) { err_printf(m, "Active process (on ring %s): %s [%d]\n", engine_str(i), @@ -945,7 +922,7 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv, * strictly a client bug. Use instdone to differentiate those some. */ for (i = 0; i < I915_NUM_ENGINES; i++) { - if (error->engine[i].hangcheck_action == HANGCHECK_HUNG) { + if (error->engine[i].hangcheck_stalled) { if (engine_id) *engine_id = i; @@ -1163,8 +1140,9 @@ static void error_record_engine_registers(struct drm_i915_error_state *error, ee->hws = I915_READ(mmio); } - ee->hangcheck_score = engine->hangcheck.score; + ee->hangcheck_timestamp = engine->hangcheck.action_timestamp; ee->hangcheck_action = engine->hangcheck.action; + ee->hangcheck_stalled = engine->hangcheck.stalled; if (USES_PPGTT(dev_priv)) { int i; diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c index 3d2e81c81252..c03db022a6d8 100644 --- a/drivers/gpu/drm/i915/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/intel_hangcheck.c @@ -236,13 +236,13 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd) memset(&engine->hangcheck.instdone, 0, sizeof(engine->hangcheck.instdone)); - return HANGCHECK_ACTIVE_HEAD; + return ENGINE_ACTIVE_HEAD; } if (!subunits_stuck(engine)) - return HANGCHECK_ACTIVE_SUBUNITS; + return ENGINE_ACTIVE_SUBUNITS; - return HANGCHECK_HUNG; + return ENGINE_DEAD; } static enum intel_engine_hangcheck_action @@ -253,11 +253,11 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd) u32 tmp; ha = head_stuck(engine, acthd); - if (ha != HANGCHECK_HUNG) + if (ha != ENGINE_DEAD) return ha; if (IS_GEN2(dev_priv)) - return HANGCHECK_HUNG; + return ENGINE_DEAD; /* Is the chip hanging on a WAIT_FOR_EVENT? * If so we can simply poke the RB_WAIT bit @@ -270,25 +270,25 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd) "Kicking stuck wait on %s", engine->name); I915_WRITE_CTL(engine, tmp); - return HANGCHECK_KICK; + return ENGINE_WAIT_KICK; } if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) { switch (semaphore_passed(engine)) { default: - return HANGCHECK_HUNG; + return ENGINE_DEAD; case 1: i915_handle_error(dev_priv, 0, "Kicking stuck semaphore on %s", engine->name); I915_WRITE_CTL(engine, tmp); - return HANGCHECK_KICK; + return ENGINE_WAIT_KICK; case 0: - return HANGCHECK_WAIT; + return ENGINE_WAIT; } } - return HANGCHECK_HUNG; + return ENGINE_DEAD; } static void hangcheck_load_sample(struct intel_engine_cs *engine, @@ -306,7 +306,6 @@ static void hangcheck_load_sample(struct intel_engine_cs *engine, hc->acthd = intel_engine_get_active_head(engine); hc->seqno = intel_engine_get_seqno(engine); - hc->score = engine->hangcheck.score; } static void hangcheck_store_sample(struct intel_engine_cs *engine, @@ -314,8 +313,8 @@ static void hangcheck_store_sample(struct intel_engine_cs *engine, { engine->hangcheck.acthd = hc->acthd; engine->hangcheck.seqno = hc->seqno; - engine->hangcheck.score = hc->score; engine->hangcheck.action = hc->action; + engine->hangcheck.stalled = hc->stalled; } static enum intel_engine_hangcheck_action @@ -323,10 +322,10 @@ hangcheck_get_action(struct intel_engine_cs *engine, const struct intel_engine_hangcheck *hc) { if (engine->hangcheck.seqno != hc->seqno) - return HANGCHECK_ACTIVE_SEQNO; + return ENGINE_ACTIVE_SEQNO; if (i915_seqno_passed(hc->seqno, intel_engine_last_submit(engine))) - return HANGCHECK_IDLE; + return ENGINE_IDLE; return engine_stuck(engine, hc->acthd); } @@ -334,60 +333,57 @@ hangcheck_get_action(struct intel_engine_cs *engine, static void hangcheck_accumulate_sample(struct intel_engine_cs *engine, struct intel_engine_hangcheck *hc) { + unsigned long timeout = I915_ENGINE_DEAD_TIMEOUT; + hc->action = hangcheck_get_action(engine, hc); - switch (hc->action) { - case HANGCHECK_IDLE: - case HANGCHECK_WAIT: - break; + /* We always increment the progress + * if the engine is busy and still processing + * the same request, so that no single request + * can run indefinitely (such as a chain of + * batches). The only time we do not increment + * the hangcheck score on this ring, if this + * engine is in a legitimate wait for another + * engine. In that case the waiting engine is a + * victim and we want to be sure we catch the + * right culprit. Then every time we do kick + * the ring, make it as a progress as the seqno + * advancement might ensure and if not, it + * will catch the hanging engine. + */ - case HANGCHECK_ACTIVE_HEAD: - case HANGCHECK_ACTIVE_SUBUNITS: - /* We always increment the hangcheck score - * if the engine is busy and still processing - * the same request, so that no single request - * can run indefinitely (such as a chain of - * batches). The only time we do not increment - * the hangcheck score on this ring, if this - * engine is in a legitimate wait for another - * engine. In that case the waiting engine is a - * victim and we want to be sure we catch the - * right culprit. Then every time we do kick - * the ring, add a small increment to the - * score so that we can catch a batch that is - * being repeatedly kicked and so responsible - * for stalling the machine. - */ - hc->score += 1; - break; + switch (hc->action) { + case ENGINE_IDLE: + case ENGINE_ACTIVE_SEQNO: + /* Clear head and subunit states on seqno movement */ + hc->acthd = 0; - case HANGCHECK_KICK: - hc->score += 5; - break; + memset(&engine->hangcheck.instdone, 0, + sizeof(engine->hangcheck.instdone)); - case HANGCHECK_HUNG: - hc->score += 20; + /* Intentional fall through */ + case ENGINE_WAIT_KICK: + case ENGINE_WAIT: + engine->hangcheck.action_timestamp = jiffies; break; - case HANGCHECK_ACTIVE_SEQNO: - /* Gradually reduce the count so that we catch DoS - * attempts across multiple batches. + case ENGINE_ACTIVE_HEAD: + case ENGINE_ACTIVE_SUBUNITS: + /* Seqno stuck with still active engine gets leeway, + * in hopes that it is just a long shader. */ - if (hc->score > 0) - hc->score -= 15; - if (hc->score < 0) - hc->score = 0; - - /* Clear head and subunit states on seqno movement */ - hc->acthd = 0; + timeout = I915_SEQNO_DEAD_TIMEOUT; + break; - memset(&engine->hangcheck.instdone, 0, - sizeof(engine->hangcheck.instdone)); + case ENGINE_DEAD: break; default: MISSING_CASE(hc->action); } + + hc->stalled = time_after(jiffies, + engine->hangcheck.action_timestamp + timeout); } static void hangcheck_declare_hang(struct drm_i915_private *i915, @@ -454,9 +450,9 @@ static void i915_hangcheck_elapsed(struct work_struct *work) hangcheck_accumulate_sample(engine, hc); hangcheck_store_sample(engine, hc); - if (hc->score >= HANGCHECK_SCORE_RING_HUNG) { + if (engine->hangcheck.stalled) { hung |= intel_engine_flag(engine); - if (hc->action != HANGCHECK_HUNG) + if (hc->action != ENGINE_DEAD) stuck |= intel_engine_flag(engine); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 3152b2b4a202..3f43adefd1c0 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -65,16 +65,37 @@ struct intel_hw_status_page { GEN8_SEMAPHORE_OFFSET(from, (__ring)->id)) enum intel_engine_hangcheck_action { - HANGCHECK_IDLE = 0, - HANGCHECK_WAIT, - HANGCHECK_ACTIVE_SEQNO, - HANGCHECK_ACTIVE_HEAD, - HANGCHECK_ACTIVE_SUBUNITS, - HANGCHECK_KICK, - HANGCHECK_HUNG, + ENGINE_IDLE = 0, + ENGINE_WAIT, + ENGINE_ACTIVE_SEQNO, + ENGINE_ACTIVE_HEAD, + ENGINE_ACTIVE_SUBUNITS, + ENGINE_WAIT_KICK, + ENGINE_DEAD, }; -#define HANGCHECK_SCORE_RING_HUNG 31 +static inline const char * +hangcheck_action_to_str(const enum intel_engine_hangcheck_action a) +{ + switch (a) { + case ENGINE_IDLE: + return "idle"; + case ENGINE_WAIT: + return "wait"; + case ENGINE_ACTIVE_SEQNO: + return "active seqno"; + case ENGINE_ACTIVE_HEAD: + return "active head"; + case ENGINE_ACTIVE_SUBUNITS: + return "active subunits"; + case ENGINE_WAIT_KICK: + return "wait kick"; + case ENGINE_DEAD: + return "dead"; + } + + return "unknown"; +} #define I915_MAX_SLICES 3 #define I915_MAX_SUBSLICES 3 @@ -106,10 +127,11 @@ struct intel_instdone { struct intel_engine_hangcheck { u64 acthd; u32 seqno; - int score; enum intel_engine_hangcheck_action action; + unsigned long action_timestamp; int deadlock; struct intel_instdone instdone; + bool stalled; }; struct intel_ring { -- cgit v1.2.3 From e5e1fc47eac842daeccc2dd125437f43e0271032 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Wed, 16 Nov 2016 17:20:31 +0200 Subject: drm/i915: Use request retirement as context progress As hangcheck score was removed, the active decay of score was removed also. This removed feature for hangcheck to detect if the gpu client was accidentally or maliciously causing intermittent hangs. Reinstate the scoring as a per context property, so that if one context starts to act unfavourably, ban it. v2: ban_period_secs as a gate to score check (Chris) v3: decay in proper spot. scores as tunables (Chris) Cc: Chris Wilson Signed-off-by: Mika Kuoppala Reviewed-by: Chris Wilson Signed-off-by: Mika Kuoppala --- drivers/gpu/drm/i915/i915_drv.h | 5 ++++ drivers/gpu/drm/i915/i915_gem.c | 44 ++++++++++++++++++++++----------- drivers/gpu/drm/i915/i915_gem_request.c | 4 +++ 3 files changed, 39 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0ebec2b77ae1..691f0b694e77 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -919,6 +919,11 @@ struct i915_ctx_hang_stats { /* This context is banned to submit more work */ bool banned; + +#define CONTEXT_SCORE_GUILTY 10 +#define CONTEXT_SCORE_BAN_THRESHOLD 40 + /* Accumulated score of hangs caused by this context */ + int ban_score; }; /* This must match up with the value previously used for execbuf2.rsvd1. */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1f8dfd4aba61..4c4aed2d2afb 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2622,33 +2622,45 @@ err_unlock: static bool i915_context_is_banned(const struct i915_gem_context *ctx) { + const struct i915_ctx_hang_stats *hs = &ctx->hang_stats; unsigned long elapsed; - if (ctx->hang_stats.banned) + if (hs->banned) return true; - elapsed = get_seconds() - ctx->hang_stats.guilty_ts; - if (ctx->hang_stats.ban_period_seconds && - elapsed <= ctx->hang_stats.ban_period_seconds) { + if (!hs->ban_period_seconds) + return false; + + elapsed = get_seconds() - hs->guilty_ts; + if (elapsed <= hs->ban_period_seconds) { DRM_DEBUG("context hanging too fast, banning!\n"); return true; } + if (hs->ban_score >= CONTEXT_SCORE_BAN_THRESHOLD) { + DRM_DEBUG("context hanging too often, banning!\n"); + return true; + } + return false; } -static void i915_set_reset_status(struct i915_gem_context *ctx, - const bool guilty) +static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) { struct i915_ctx_hang_stats *hs = &ctx->hang_stats; - if (guilty) { - hs->banned = i915_context_is_banned(ctx); - hs->batch_active++; - hs->guilty_ts = get_seconds(); - } else { - hs->batch_pending++; - } + hs->ban_score += CONTEXT_SCORE_GUILTY; + + hs->banned = i915_context_is_banned(ctx); + hs->batch_active++; + hs->guilty_ts = get_seconds(); +} + +static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx) +{ + struct i915_ctx_hang_stats *hs = &ctx->hang_stats; + + hs->batch_pending++; } struct drm_i915_gem_request * @@ -2713,7 +2725,11 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine) ring_hung = false; } - i915_set_reset_status(request->ctx, ring_hung); + if (ring_hung) + i915_gem_context_mark_guilty(request->ctx); + else + i915_gem_context_mark_innocent(request->ctx); + if (!ring_hung) return; diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 27e8f257fb39..60e63956ea19 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -263,6 +263,10 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) request->engine); } + /* Retirement decays the ban score as it is a sign of ctx progress */ + if (request->ctx->hang_stats.ban_score > 0) + request->ctx->hang_stats.ban_score--; + i915_gem_context_put(request->ctx); dma_fence_signal(&request->fence); -- cgit v1.2.3 From 841021713a4d700059774e0d3c77ec6c538daae2 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Wed, 16 Nov 2016 17:20:32 +0200 Subject: drm/i915: Add bannable context parameter Now when driver has per context scoring of 'hanging badness' and also subsequent hangs during short windows are allowed, if there is progress made in between, it does not make sense to expose a ban timing window as a context parameter anymore. Let the scoring be the sole indicator for ban policy and substitute ban period context parameter as a boolean to get/set context bannable property. v2: allow non root to opt into being banned (Chris) Cc: Chris Wilson Suggested-by: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Mika Kuoppala --- drivers/gpu/drm/i915/i915_drv.h | 13 +++---------- drivers/gpu/drm/i915/i915_gem.c | 10 +--------- drivers/gpu/drm/i915/i915_gem_context.c | 23 ++++++++++++++--------- drivers/gpu/drm/i915/i915_gpu_error.c | 5 +++-- include/uapi/drm/i915_drm.h | 1 + 5 files changed, 22 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 691f0b694e77..7db7b37937e7 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -850,6 +850,7 @@ struct drm_i915_error_state { long jiffies; pid_t pid; u32 context; + int ban_score; u32 seqno; u32 head; u32 tail; @@ -909,16 +910,10 @@ struct i915_ctx_hang_stats { /* This context had batch active when hang was declared */ unsigned batch_active; - /* Time when this context was last blamed for a GPU reset */ - unsigned long guilty_ts; - - /* If the contexts causes a second GPU hang within this time, - * it is permanently banned from submitting any more work. - */ - unsigned long ban_period_seconds; + bool bannable:1; /* This context is banned to submit more work */ - bool banned; + bool banned:1; #define CONTEXT_SCORE_GUILTY 10 #define CONTEXT_SCORE_BAN_THRESHOLD 40 @@ -1459,8 +1454,6 @@ struct i915_gpu_error { /* For hangcheck timer */ #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) - /* Hang gpu twice in this window and your context gets banned */ -#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) struct delayed_work hangcheck_work; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 4c4aed2d2afb..99198146ecc8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2623,20 +2623,13 @@ err_unlock: static bool i915_context_is_banned(const struct i915_gem_context *ctx) { const struct i915_ctx_hang_stats *hs = &ctx->hang_stats; - unsigned long elapsed; if (hs->banned) return true; - if (!hs->ban_period_seconds) + if (!hs->bannable) return false; - elapsed = get_seconds() - hs->guilty_ts; - if (elapsed <= hs->ban_period_seconds) { - DRM_DEBUG("context hanging too fast, banning!\n"); - return true; - } - if (hs->ban_score >= CONTEXT_SCORE_BAN_THRESHOLD) { DRM_DEBUG("context hanging too often, banning!\n"); return true; @@ -2653,7 +2646,6 @@ static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) hs->banned = i915_context_is_banned(ctx); hs->batch_active++; - hs->guilty_ts = get_seconds(); } static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index d95dfec7166e..97012373a8d5 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -331,7 +331,7 @@ __create_hw_context(struct drm_device *dev, * is no remap info, it will be a NOP. */ ctx->remap_slice = ALL_L3_SLICES(dev_priv); - ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD; + ctx->hang_stats.bannable = true; ctx->ring_size = 4 * PAGE_SIZE; ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) << GEN8_CTX_ADDRESSING_MODE_SHIFT; @@ -1085,7 +1085,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, args->size = 0; switch (args->param) { case I915_CONTEXT_PARAM_BAN_PERIOD: - args->value = ctx->hang_stats.ban_period_seconds; + ret = -EINVAL; break; case I915_CONTEXT_PARAM_NO_ZEROMAP: args->value = ctx->flags & CONTEXT_NO_ZEROMAP; @@ -1101,6 +1101,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: args->value = !!(ctx->flags & CONTEXT_NO_ERROR_CAPTURE); break; + case I915_CONTEXT_PARAM_BANNABLE: + args->value = ctx->hang_stats.bannable; + break; default: ret = -EINVAL; break; @@ -1130,13 +1133,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, switch (args->param) { case I915_CONTEXT_PARAM_BAN_PERIOD: - if (args->size) - ret = -EINVAL; - else if (args->value < ctx->hang_stats.ban_period_seconds && - !capable(CAP_SYS_ADMIN)) - ret = -EPERM; - else - ctx->hang_stats.ban_period_seconds = args->value; + ret = -EINVAL; break; case I915_CONTEXT_PARAM_NO_ZEROMAP: if (args->size) { @@ -1156,6 +1153,14 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, ctx->flags &= ~CONTEXT_NO_ERROR_CAPTURE; } break; + case I915_CONTEXT_PARAM_BANNABLE: + if (args->size) + ret = -EINVAL; + else if (!capable(CAP_SYS_ADMIN) && !args->value) + ret = -EPERM; + else + ctx->hang_stats.bannable = args->value; + break; default: ret = -EINVAL; break; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index d5a4ec9b507f..fa988a00ae68 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -352,8 +352,8 @@ static void error_print_request(struct drm_i915_error_state_buf *m, if (!erq->seqno) return; - err_printf(m, "%s pid %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n", - prefix, erq->pid, + err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n", + prefix, erq->pid, erq->ban_score, erq->context, erq->seqno, jiffies_to_msecs(jiffies - erq->jiffies), erq->head, erq->tail); @@ -1170,6 +1170,7 @@ static void record_request(struct drm_i915_gem_request *request, struct drm_i915_error_request *erq) { erq->context = request->ctx->hw_id; + erq->ban_score = request->ctx->hang_stats.ban_score; erq->seqno = request->global_seqno; erq->jiffies = request->emitted_jiffies; erq->head = request->head; diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 1c12a350eca3..12003f0d8c7f 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -1224,6 +1224,7 @@ struct drm_i915_gem_context_param { #define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 #define I915_CONTEXT_PARAM_GTT_SIZE 0x3 #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 +#define I915_CONTEXT_PARAM_BANNABLE 0x5 __u64 value; }; -- cgit v1.2.3 From b083a0870c7937e131762f8b702afa6bac8d08f4 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Fri, 18 Nov 2016 15:10:47 +0200 Subject: drm/i915: Add per client max context ban limit If we have a bad client submitting unfavourably across different contexts, creating new ones, the per context scoring of badness doesn't remove the root cause, the offending client. To counter, keep track of per client context bans. Deny access if client is responsible for more than 3 context bans in it's lifetime. v2: move ban check to context create ioctl (Chris) v3: add commentary about hangs needed to reach client ban (Chris) Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Mika Kuoppala --- drivers/gpu/drm/i915/i915_drv.h | 10 ++++++++++ drivers/gpu/drm/i915/i915_gem.c | 14 ++++++++++++++ drivers/gpu/drm/i915/i915_gem_context.c | 13 +++++++++++++ drivers/gpu/drm/i915/i915_gpu_error.c | 10 ++++++---- 4 files changed, 43 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7db7b37937e7..1f0b5c37f6de 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -416,6 +416,15 @@ struct drm_i915_file_private { } rps; unsigned int bsd_engine; + +/* Client can have a maximum of 3 contexts banned before + * it is denied of creating new contexts. As one context + * ban needs 4 consecutive hangs, and more if there is + * progress in between, this is a last resort stop gap measure + * to limit the badly behaving clients access to gpu. + */ +#define I915_MAX_CLIENT_CONTEXT_BANS 3 + int context_bans; }; /* Used by dp and fdi links */ @@ -872,6 +881,7 @@ struct drm_i915_error_state { pid_t pid; char comm[TASK_COMM_LEN]; + int context_bans; } engine[I915_NUM_ENGINES]; struct drm_i915_error_buffer { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 99198146ecc8..a8118386a23b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2646,6 +2646,20 @@ static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) hs->banned = i915_context_is_banned(ctx); hs->batch_active++; + + DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n", + ctx->name, hs->ban_score, yesno(hs->banned)); + + if (!ctx->file_priv) + return; + + if (hs->banned) { + ctx->file_priv->context_bans++; + + DRM_DEBUG_DRIVER("client %s has has %d context banned\n", + ctx->name, + ctx->file_priv->context_bans); + } } static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 97012373a8d5..88d59866c433 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -1003,6 +1003,11 @@ static bool contexts_enabled(struct drm_device *dev) return i915.enable_execlists || to_i915(dev)->hw_context_size; } +static bool client_is_banned(struct drm_i915_file_private *file_priv) +{ + return file_priv->context_bans > I915_MAX_CLIENT_CONTEXT_BANS; +} + int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { @@ -1017,6 +1022,14 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, if (args->pad != 0) return -EINVAL; + if (client_is_banned(file_priv)) { + DRM_DEBUG("client %s[%d] banned from creating ctx\n", + current->comm, + pid_nr(get_task_pid(current, PIDTYPE_PID))); + + return -EIO; + } + ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index fa988a00ae68..af4f0ef4fa08 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -538,10 +538,11 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, for (i = 0; i < ARRAY_SIZE(error->engine); i++) { if (error->engine[i].hangcheck_stalled && error->engine[i].pid != -1) { - err_printf(m, "Active process (on ring %s): %s [%d]\n", + err_printf(m, "Active process (on ring %s): %s [%d], context bans %d\n", engine_str(i), error->engine[i].comm, - error->engine[i].pid); + error->engine[i].pid, + error->engine[i].context_bans); } } err_printf(m, "Reset count: %u\n", error->reset_count); @@ -632,9 +633,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, if (obj) { err_puts(m, dev_priv->engine[i]->name); if (ee->pid != -1) - err_printf(m, " (submitted by %s [%d])", + err_printf(m, " (submitted by %s [%d], bans %d)", ee->comm, - ee->pid); + ee->pid, + ee->context_bans); err_printf(m, " --- gtt_offset = 0x%08x %08x\n", upper_32_bits(obj->gtt_offset), lower_32_bits(obj->gtt_offset)); -- cgit v1.2.3 From bc1d53c64773e89175766e8c77cc96aa0763de4a Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Wed, 16 Nov 2016 17:20:34 +0200 Subject: drm/i915: Wipe hang stats as an embedded struct Bannable property, banned status, guilty and active counts are properties of i915_gem_context. Make them so. v2: rebase Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Mika Kuoppala Link: http://patchwork.freedesktop.org/patch/msgid/1479309634-28574-1-git-send-email-mika.kuoppala@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 31 ++++++++++-------------------- drivers/gpu/drm/i915/i915_gem.c | 25 ++++++++++-------------- drivers/gpu/drm/i915/i915_gem_context.c | 12 +++++------- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 4 +--- drivers/gpu/drm/i915/i915_gem_request.c | 4 ++-- drivers/gpu/drm/i915/i915_gpu_error.c | 2 +- 6 files changed, 29 insertions(+), 49 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1f0b5c37f6de..c7d5f7a30fe8 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -913,25 +913,6 @@ enum i915_cache_level { I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ }; -struct i915_ctx_hang_stats { - /* This context had batch pending when hang was declared */ - unsigned batch_pending; - - /* This context had batch active when hang was declared */ - unsigned batch_active; - - bool bannable:1; - - /* This context is banned to submit more work */ - bool banned:1; - -#define CONTEXT_SCORE_GUILTY 10 -#define CONTEXT_SCORE_BAN_THRESHOLD 40 - /* Accumulated score of hangs caused by this context */ - int ban_score; -}; - -/* This must match up with the value previously used for execbuf2.rsvd1. */ #define DEFAULT_CONTEXT_HANDLE 0 /** @@ -961,8 +942,6 @@ struct i915_gem_context { struct pid *pid; const char *name; - struct i915_ctx_hang_stats hang_stats; - unsigned long flags; #define CONTEXT_NO_ZEROMAP BIT(0) #define CONTEXT_NO_ERROR_CAPTURE BIT(1) @@ -991,6 +970,16 @@ struct i915_gem_context { u8 remap_slice; bool closed:1; + bool bannable:1; + bool banned:1; + + unsigned int guilty_count; /* guilty of a hang */ + unsigned int active_count; /* active during hang */ + +#define CONTEXT_SCORE_GUILTY 10 +#define CONTEXT_SCORE_BAN_THRESHOLD 40 + /* Accumulated score of hangs caused by this context */ + int ban_score; }; enum fb_op_origin { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a8118386a23b..b38d4f8d1875 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2622,15 +2622,13 @@ err_unlock: static bool i915_context_is_banned(const struct i915_gem_context *ctx) { - const struct i915_ctx_hang_stats *hs = &ctx->hang_stats; - - if (hs->banned) + if (ctx->banned) return true; - if (!hs->bannable) + if (!ctx->bannable) return false; - if (hs->ban_score >= CONTEXT_SCORE_BAN_THRESHOLD) { + if (ctx->ban_score >= CONTEXT_SCORE_BAN_THRESHOLD) { DRM_DEBUG("context hanging too often, banning!\n"); return true; } @@ -2640,20 +2638,19 @@ static bool i915_context_is_banned(const struct i915_gem_context *ctx) static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) { - struct i915_ctx_hang_stats *hs = &ctx->hang_stats; - - hs->ban_score += CONTEXT_SCORE_GUILTY; + ctx->ban_score += CONTEXT_SCORE_GUILTY; - hs->banned = i915_context_is_banned(ctx); - hs->batch_active++; + ctx->banned = i915_context_is_banned(ctx); + ctx->guilty_count++; DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n", - ctx->name, hs->ban_score, yesno(hs->banned)); + ctx->name, ctx->ban_score, + yesno(ctx->banned)); if (!ctx->file_priv) return; - if (hs->banned) { + if (ctx->banned) { ctx->file_priv->context_bans++; DRM_DEBUG_DRIVER("client %s has has %d context banned\n", @@ -2664,9 +2661,7 @@ static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx) { - struct i915_ctx_hang_stats *hs = &ctx->hang_stats; - - hs->batch_pending++; + ctx->active_count++; } struct drm_i915_gem_request * diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 88d59866c433..f82936a2fcce 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -331,7 +331,7 @@ __create_hw_context(struct drm_device *dev, * is no remap info, it will be a NOP. */ ctx->remap_slice = ALL_L3_SLICES(dev_priv); - ctx->hang_stats.bannable = true; + ctx->bannable = true; ctx->ring_size = 4 * PAGE_SIZE; ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) << GEN8_CTX_ADDRESSING_MODE_SHIFT; @@ -1115,7 +1115,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, args->value = !!(ctx->flags & CONTEXT_NO_ERROR_CAPTURE); break; case I915_CONTEXT_PARAM_BANNABLE: - args->value = ctx->hang_stats.bannable; + args->value = ctx->bannable; break; default: ret = -EINVAL; @@ -1172,7 +1172,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, else if (!capable(CAP_SYS_ADMIN) && !args->value) ret = -EPERM; else - ctx->hang_stats.bannable = args->value; + ctx->bannable = args->value; break; default: ret = -EINVAL; @@ -1188,7 +1188,6 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, { struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_reset_stats *args = data; - struct i915_ctx_hang_stats *hs; struct i915_gem_context *ctx; int ret; @@ -1207,15 +1206,14 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, mutex_unlock(&dev->struct_mutex); return PTR_ERR(ctx); } - hs = &ctx->hang_stats; if (capable(CAP_SYS_ADMIN)) args->reset_count = i915_reset_count(&dev_priv->gpu_error); else args->reset_count = 0; - args->batch_active = hs->batch_active; - args->batch_pending = hs->batch_pending; + args->batch_active = ctx->guilty_count; + args->batch_pending = ctx->active_count; mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 097d9d8c2315..522ecfb4dc9d 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1232,14 +1232,12 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, struct intel_engine_cs *engine, const u32 ctx_id) { struct i915_gem_context *ctx; - struct i915_ctx_hang_stats *hs; ctx = i915_gem_context_lookup(file->driver_priv, ctx_id); if (IS_ERR(ctx)) return ctx; - hs = &ctx->hang_stats; - if (hs->banned) { + if (ctx->banned) { DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id); return ERR_PTR(-EIO); } diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 60e63956ea19..9f37eaa3723a 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -264,8 +264,8 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) } /* Retirement decays the ban score as it is a sign of ctx progress */ - if (request->ctx->hang_stats.ban_score > 0) - request->ctx->hang_stats.ban_score--; + if (request->ctx->ban_score > 0) + request->ctx->ban_score--; i915_gem_context_put(request->ctx); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index af4f0ef4fa08..82458ea60150 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1172,7 +1172,7 @@ static void record_request(struct drm_i915_gem_request *request, struct drm_i915_error_request *erq) { erq->context = request->ctx->hw_id; - erq->ban_score = request->ctx->hang_stats.ban_score; + erq->ban_score = request->ctx->ban_score; erq->seqno = request->global_seqno; erq->jiffies = request->emitted_jiffies; erq->head = request->head; -- cgit v1.2.3 From eec688e1420da584afb36ffa5f0cad75f53cf286 Mon Sep 17 00:00:00 2001 From: Robert Bragg Date: Mon, 7 Nov 2016 19:49:47 +0000 Subject: drm/i915: Add i915 perf infrastructure Adds base i915 perf infrastructure for Gen performance metrics. This adds a DRM_IOCTL_I915_PERF_OPEN ioctl that takes an array of uint64 properties to configure a stream of metrics and returns a new fd usable with standard VFS system calls including read() to read typed and sized records; ioctl() to enable or disable capture and poll() to wait for data. A stream is opened something like: uint64_t properties[] = { /* Single context sampling */ DRM_I915_PERF_PROP_CTX_HANDLE, ctx_handle, /* Include OA reports in samples */ DRM_I915_PERF_PROP_SAMPLE_OA, true, /* OA unit configuration */ DRM_I915_PERF_PROP_OA_METRICS_SET, metrics_set_id, DRM_I915_PERF_PROP_OA_FORMAT, report_format, DRM_I915_PERF_PROP_OA_EXPONENT, period_exponent, }; struct drm_i915_perf_open_param parm = { .flags = I915_PERF_FLAG_FD_CLOEXEC | I915_PERF_FLAG_FD_NONBLOCK | I915_PERF_FLAG_DISABLED, .properties_ptr = (uint64_t)properties, .num_properties = sizeof(properties) / 16, }; int fd = drmIoctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, ¶m); Records read all start with a common { type, size } header with DRM_I915_PERF_RECORD_SAMPLE being of most interest. Sample records contain an extensible number of fields and it's the DRM_I915_PERF_PROP_SAMPLE_xyz properties given when opening that determine what's included in every sample. No specific streams are supported yet so any attempt to open a stream will return an error. v2: use i915_gem_context_get() - Chris Wilson v3: update read() interface to avoid passing state struct - Chris Wilson fix some rebase fallout, with i915-perf init/deinit v4: s/DRM_IORW/DRM_IOW/ - Emil Velikov Signed-off-by: Robert Bragg Reviewed-by: Matthew Auld Reviewed-by: Sourab Gupta Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161107194957.3385-2-robert@sixbynine.org --- drivers/gpu/drm/i915/Makefile | 3 + drivers/gpu/drm/i915/i915_drv.c | 4 + drivers/gpu/drm/i915/i915_drv.h | 91 ++++++++ drivers/gpu/drm/i915/i915_perf.c | 449 +++++++++++++++++++++++++++++++++++++++ include/uapi/drm/i915_drm.h | 67 ++++++ 5 files changed, 614 insertions(+) create mode 100644 drivers/gpu/drm/i915/i915_perf.c (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 3dea46af9fe6..3e40d8f51f90 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -117,6 +117,9 @@ i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o # virtual gpu code i915-y += i915_vgpu.o +# perf code +i915-y += i915_perf.o + ifeq ($(CONFIG_DRM_I915_GVT),y) i915-y += intel_gvt.o include $(src)/gvt/Makefile diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 445fec9c2841..94a9fb0f7b39 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -848,6 +848,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, intel_detect_preproduction_hw(dev_priv); + i915_perf_init(dev_priv); + return 0; err_gvt: @@ -863,6 +865,7 @@ err_workqueues: */ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) { + i915_perf_fini(dev_priv); i915_gem_load_cleanup(&dev_priv->drm); i915_workqueues_cleanup(dev_priv); } @@ -2565,6 +2568,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW), }; static struct drm_driver driver = { diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c7d5f7a30fe8..b69f8445af7e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1797,6 +1797,84 @@ struct intel_wm_config { bool sprites_scaled; }; +struct i915_perf_stream; + +struct i915_perf_stream_ops { + /* Enables the collection of HW samples, either in response to + * I915_PERF_IOCTL_ENABLE or implicitly called when stream is + * opened without I915_PERF_FLAG_DISABLED. + */ + void (*enable)(struct i915_perf_stream *stream); + + /* Disables the collection of HW samples, either in response to + * I915_PERF_IOCTL_DISABLE or implicitly called before + * destroying the stream. + */ + void (*disable)(struct i915_perf_stream *stream); + + /* Return: true if any i915 perf records are ready to read() + * for this stream. + */ + bool (*can_read)(struct i915_perf_stream *stream); + + /* Call poll_wait, passing a wait queue that will be woken + * once there is something ready to read() for the stream + */ + void (*poll_wait)(struct i915_perf_stream *stream, + struct file *file, + poll_table *wait); + + /* For handling a blocking read, wait until there is something + * to ready to read() for the stream. E.g. wait on the same + * wait queue that would be passed to poll_wait() until + * ->can_read() returns true (if its safe to call ->can_read() + * without the i915 perf lock held). + */ + int (*wait_unlocked)(struct i915_perf_stream *stream); + + /* read - Copy buffered metrics as records to userspace + * @buf: the userspace, destination buffer + * @count: the number of bytes to copy, requested by userspace + * @offset: zero at the start of the read, updated as the read + * proceeds, it represents how many bytes have been + * copied so far and the buffer offset for copying the + * next record. + * + * Copy as many buffered i915 perf samples and records for + * this stream to userspace as will fit in the given buffer. + * + * Only write complete records; returning -ENOSPC if there + * isn't room for a complete record. + * + * Return any error condition that results in a short read + * such as -ENOSPC or -EFAULT, even though these may be + * squashed before returning to userspace. + */ + int (*read)(struct i915_perf_stream *stream, + char __user *buf, + size_t count, + size_t *offset); + + /* Cleanup any stream specific resources. + * + * The stream will always be disabled before this is called. + */ + void (*destroy)(struct i915_perf_stream *stream); +}; + +struct i915_perf_stream { + struct drm_i915_private *dev_priv; + + struct list_head link; + + u32 sample_flags; + + struct i915_gem_context *ctx; + bool enabled; + + struct i915_perf_stream_ops *ops; +}; + struct drm_i915_private { struct drm_device drm; @@ -2092,6 +2170,12 @@ struct drm_i915_private { struct i915_runtime_pm pm; + struct { + bool initialized; + struct mutex lock; + struct list_head streams; + } perf; + /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ struct { void (*resume)(struct drm_i915_private *); @@ -3253,6 +3337,9 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +int i915_perf_open_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); + /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct i915_address_space *vm, u64 min_size, u64 alignment, @@ -3383,6 +3470,10 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, u32 batch_len, bool is_master); +/* i915_perf.c */ +extern void i915_perf_init(struct drm_i915_private *dev_priv); +extern void i915_perf_fini(struct drm_i915_private *dev_priv); + /* i915_suspend.c */ extern int i915_save_state(struct drm_device *dev); extern int i915_restore_state(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c new file mode 100644 index 000000000000..777ce65f910d --- /dev/null +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -0,0 +1,449 @@ +/* + * Copyright © 2015-2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Robert Bragg + */ + +#include + +#include "i915_drv.h" + +struct perf_open_properties { + u32 sample_flags; + + u64 single_context:1; + u64 ctx_handle; +}; + +static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream, + struct file *file, + char __user *buf, + size_t count, + loff_t *ppos) +{ + /* Note we keep the offset (aka bytes read) separate from any + * error status so that the final check for whether we return + * the bytes read with a higher precedence than any error (see + * comment below) doesn't need to be handled/duplicated in + * stream->ops->read() implementations. + */ + size_t offset = 0; + int ret = stream->ops->read(stream, buf, count, &offset); + + /* If we've successfully copied any data then reporting that + * takes precedence over any internal error status, so the + * data isn't lost. + * + * For example ret will be -ENOSPC whenever there is more + * buffered data than can be copied to userspace, but that's + * only interesting if we weren't able to copy some data + * because it implies the userspace buffer is too small to + * receive a single record (and we never split records). + * + * Another case with ret == -EFAULT is more of a grey area + * since it would seem like bad form for userspace to ask us + * to overrun its buffer, but the user knows best: + * + * http://yarchive.net/comp/linux/partial_reads_writes.html + */ + return offset ?: (ret ?: -EAGAIN); +} + +static ssize_t i915_perf_read(struct file *file, + char __user *buf, + size_t count, + loff_t *ppos) +{ + struct i915_perf_stream *stream = file->private_data; + struct drm_i915_private *dev_priv = stream->dev_priv; + ssize_t ret; + + if (!(file->f_flags & O_NONBLOCK)) { + /* Allow false positives from stream->ops->wait_unlocked. + */ + do { + ret = stream->ops->wait_unlocked(stream); + if (ret) + return ret; + + mutex_lock(&dev_priv->perf.lock); + ret = i915_perf_read_locked(stream, file, + buf, count, ppos); + mutex_unlock(&dev_priv->perf.lock); + } while (ret == -EAGAIN); + } else { + mutex_lock(&dev_priv->perf.lock); + ret = i915_perf_read_locked(stream, file, buf, count, ppos); + mutex_unlock(&dev_priv->perf.lock); + } + + return ret; +} + +static unsigned int i915_perf_poll_locked(struct i915_perf_stream *stream, + struct file *file, + poll_table *wait) +{ + unsigned int streams = 0; + + stream->ops->poll_wait(stream, file, wait); + + if (stream->ops->can_read(stream)) + streams |= POLLIN; + + return streams; +} + +static unsigned int i915_perf_poll(struct file *file, poll_table *wait) +{ + struct i915_perf_stream *stream = file->private_data; + struct drm_i915_private *dev_priv = stream->dev_priv; + int ret; + + mutex_lock(&dev_priv->perf.lock); + ret = i915_perf_poll_locked(stream, file, wait); + mutex_unlock(&dev_priv->perf.lock); + + return ret; +} + +static void i915_perf_enable_locked(struct i915_perf_stream *stream) +{ + if (stream->enabled) + return; + + /* Allow stream->ops->enable() to refer to this */ + stream->enabled = true; + + if (stream->ops->enable) + stream->ops->enable(stream); +} + +static void i915_perf_disable_locked(struct i915_perf_stream *stream) +{ + if (!stream->enabled) + return; + + /* Allow stream->ops->disable() to refer to this */ + stream->enabled = false; + + if (stream->ops->disable) + stream->ops->disable(stream); +} + +static long i915_perf_ioctl_locked(struct i915_perf_stream *stream, + unsigned int cmd, + unsigned long arg) +{ + switch (cmd) { + case I915_PERF_IOCTL_ENABLE: + i915_perf_enable_locked(stream); + return 0; + case I915_PERF_IOCTL_DISABLE: + i915_perf_disable_locked(stream); + return 0; + } + + return -EINVAL; +} + +static long i915_perf_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + struct i915_perf_stream *stream = file->private_data; + struct drm_i915_private *dev_priv = stream->dev_priv; + long ret; + + mutex_lock(&dev_priv->perf.lock); + ret = i915_perf_ioctl_locked(stream, cmd, arg); + mutex_unlock(&dev_priv->perf.lock); + + return ret; +} + +static void i915_perf_destroy_locked(struct i915_perf_stream *stream) +{ + struct drm_i915_private *dev_priv = stream->dev_priv; + + if (stream->enabled) + i915_perf_disable_locked(stream); + + if (stream->ops->destroy) + stream->ops->destroy(stream); + + list_del(&stream->link); + + if (stream->ctx) { + mutex_lock(&dev_priv->drm.struct_mutex); + i915_gem_context_put(stream->ctx); + mutex_unlock(&dev_priv->drm.struct_mutex); + } + + kfree(stream); +} + +static int i915_perf_release(struct inode *inode, struct file *file) +{ + struct i915_perf_stream *stream = file->private_data; + struct drm_i915_private *dev_priv = stream->dev_priv; + + mutex_lock(&dev_priv->perf.lock); + i915_perf_destroy_locked(stream); + mutex_unlock(&dev_priv->perf.lock); + + return 0; +} + + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .release = i915_perf_release, + .poll = i915_perf_poll, + .read = i915_perf_read, + .unlocked_ioctl = i915_perf_ioctl, +}; + + +static struct i915_gem_context * +lookup_context(struct drm_i915_private *dev_priv, + struct drm_i915_file_private *file_priv, + u32 ctx_user_handle) +{ + struct i915_gem_context *ctx; + int ret; + + ret = i915_mutex_lock_interruptible(&dev_priv->drm); + if (ret) + return ERR_PTR(ret); + + ctx = i915_gem_context_lookup(file_priv, ctx_user_handle); + if (!IS_ERR(ctx)) + i915_gem_context_get(ctx); + + mutex_unlock(&dev_priv->drm.struct_mutex); + + return ctx; +} + +static int +i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, + struct drm_i915_perf_open_param *param, + struct perf_open_properties *props, + struct drm_file *file) +{ + struct i915_gem_context *specific_ctx = NULL; + struct i915_perf_stream *stream = NULL; + unsigned long f_flags = 0; + int stream_fd; + int ret; + + if (props->single_context) { + u32 ctx_handle = props->ctx_handle; + struct drm_i915_file_private *file_priv = file->driver_priv; + + specific_ctx = lookup_context(dev_priv, file_priv, ctx_handle); + if (IS_ERR(specific_ctx)) { + ret = PTR_ERR(specific_ctx); + if (ret != -EINTR) + DRM_ERROR("Failed to look up context with ID %u for opening perf stream\n", + ctx_handle); + goto err; + } + } + + if (!specific_ctx && !capable(CAP_SYS_ADMIN)) { + DRM_ERROR("Insufficient privileges to open system-wide i915 perf stream\n"); + ret = -EACCES; + goto err_ctx; + } + + stream = kzalloc(sizeof(*stream), GFP_KERNEL); + if (!stream) { + ret = -ENOMEM; + goto err_ctx; + } + + stream->sample_flags = props->sample_flags; + stream->dev_priv = dev_priv; + stream->ctx = specific_ctx; + + /* + * TODO: support sampling something + * + * For now this is as far as we can go. + */ + DRM_ERROR("Unsupported i915 perf stream configuration\n"); + ret = -EINVAL; + goto err_alloc; + + list_add(&stream->link, &dev_priv->perf.streams); + + if (param->flags & I915_PERF_FLAG_FD_CLOEXEC) + f_flags |= O_CLOEXEC; + if (param->flags & I915_PERF_FLAG_FD_NONBLOCK) + f_flags |= O_NONBLOCK; + + stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags); + if (stream_fd < 0) { + ret = stream_fd; + goto err_open; + } + + if (!(param->flags & I915_PERF_FLAG_DISABLED)) + i915_perf_enable_locked(stream); + + return stream_fd; + +err_open: + list_del(&stream->link); + if (stream->ops->destroy) + stream->ops->destroy(stream); +err_alloc: + kfree(stream); +err_ctx: + if (specific_ctx) { + mutex_lock(&dev_priv->drm.struct_mutex); + i915_gem_context_put(specific_ctx); + mutex_unlock(&dev_priv->drm.struct_mutex); + } +err: + return ret; +} + +/* Note we copy the properties from userspace outside of the i915 perf + * mutex to avoid an awkward lockdep with mmap_sem. + * + * Note this function only validates properties in isolation it doesn't + * validate that the combination of properties makes sense or that all + * properties necessary for a particular kind of stream have been set. + */ +static int read_properties_unlocked(struct drm_i915_private *dev_priv, + u64 __user *uprops, + u32 n_props, + struct perf_open_properties *props) +{ + u64 __user *uprop = uprops; + int i; + + memset(props, 0, sizeof(struct perf_open_properties)); + + if (!n_props) { + DRM_ERROR("No i915 perf properties given"); + return -EINVAL; + } + + /* Considering that ID = 0 is reserved and assuming that we don't + * (currently) expect any configurations to ever specify duplicate + * values for a particular property ID then the last _PROP_MAX value is + * one greater than the maximum number of properties we expect to get + * from userspace. + */ + if (n_props >= DRM_I915_PERF_PROP_MAX) { + DRM_ERROR("More i915 perf properties specified than exist"); + return -EINVAL; + } + + for (i = 0; i < n_props; i++) { + u64 id, value; + int ret; + + ret = get_user(id, uprop); + if (ret) + return ret; + + ret = get_user(value, uprop + 1); + if (ret) + return ret; + + switch ((enum drm_i915_perf_property_id)id) { + case DRM_I915_PERF_PROP_CTX_HANDLE: + props->single_context = 1; + props->ctx_handle = value; + break; + default: + MISSING_CASE(id); + DRM_ERROR("Unknown i915 perf property ID"); + return -EINVAL; + } + + uprop += 2; + } + + return 0; +} + +int i915_perf_open_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_perf_open_param *param = data; + struct perf_open_properties props; + u32 known_open_flags; + int ret; + + if (!dev_priv->perf.initialized) { + DRM_ERROR("i915 perf interface not available for this system"); + return -ENOTSUPP; + } + + known_open_flags = I915_PERF_FLAG_FD_CLOEXEC | + I915_PERF_FLAG_FD_NONBLOCK | + I915_PERF_FLAG_DISABLED; + if (param->flags & ~known_open_flags) { + DRM_ERROR("Unknown drm_i915_perf_open_param flag\n"); + return -EINVAL; + } + + ret = read_properties_unlocked(dev_priv, + u64_to_user_ptr(param->properties_ptr), + param->num_properties, + &props); + if (ret) + return ret; + + mutex_lock(&dev_priv->perf.lock); + ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file); + mutex_unlock(&dev_priv->perf.lock); + + return ret; +} + +void i915_perf_init(struct drm_i915_private *dev_priv) +{ + INIT_LIST_HEAD(&dev_priv->perf.streams); + mutex_init(&dev_priv->perf.lock); + + dev_priv->perf.initialized = true; +} + +void i915_perf_fini(struct drm_i915_private *dev_priv) +{ + if (!dev_priv->perf.initialized) + return; + + /* Currently nothing to clean up */ + + dev_priv->perf.initialized = false; +} diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 12003f0d8c7f..192dc1f8256b 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -258,6 +258,7 @@ typedef struct _drm_i915_sarea { #define DRM_I915_GEM_USERPTR 0x33 #define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 #define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 +#define DRM_I915_PERF_OPEN 0x36 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) @@ -311,6 +312,7 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr) #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) +#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param) /* Allow drivers to submit batchbuffers directly to hardware, relying * on the security mechanisms provided by hardware. @@ -1228,6 +1230,71 @@ struct drm_i915_gem_context_param { __u64 value; }; +enum drm_i915_perf_property_id { + /** + * Open the stream for a specific context handle (as used with + * execbuffer2). A stream opened for a specific context this way + * won't typically require root privileges. + */ + DRM_I915_PERF_PROP_CTX_HANDLE = 1, + + DRM_I915_PERF_PROP_MAX /* non-ABI */ +}; + +struct drm_i915_perf_open_param { + __u32 flags; +#define I915_PERF_FLAG_FD_CLOEXEC (1<<0) +#define I915_PERF_FLAG_FD_NONBLOCK (1<<1) +#define I915_PERF_FLAG_DISABLED (1<<2) + + /** The number of u64 (id, value) pairs */ + __u32 num_properties; + + /** + * Pointer to array of u64 (id, value) pairs configuring the stream + * to open. + */ + __u64 __user properties_ptr; +}; + +#define I915_PERF_IOCTL_ENABLE _IO('i', 0x0) +#define I915_PERF_IOCTL_DISABLE _IO('i', 0x1) + +/** + * Common to all i915 perf records + */ +struct drm_i915_perf_record_header { + __u32 type; + __u16 pad; + __u16 size; +}; + +enum drm_i915_perf_record_type { + + /** + * Samples are the work horse record type whose contents are extensible + * and defined when opening an i915 perf stream based on the given + * properties. + * + * Boolean properties following the naming convention + * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in + * every sample. + * + * The order of these sample properties given by userspace has no + * affect on the ordering of data within a sample. The order will be + * documented here. + * + * struct { + * struct drm_i915_perf_record_header header; + * + * TODO: itemize extensible sample data here + * }; + */ + DRM_I915_PERF_RECORD_SAMPLE = 1, + + DRM_I915_PERF_RECORD_MAX /* non-ABI */ +}; + #if defined(__cplusplus) } #endif -- cgit v1.2.3 From a941795a3aead3765d6f9e12dc1828fe06ba3d5d Mon Sep 17 00:00:00 2001 From: Robert Bragg Date: Mon, 7 Nov 2016 19:49:48 +0000 Subject: drm/i915: rename OACONTROL GEN7_OACONTROL OACONTROL changes quite a bit for gen8, with some bits split out into a per-context OACTXCONTROL register. Rename now before adding more gen7 OA registers Signed-off-by: Robert Bragg Reviewed-by: Matthew Auld Reviewed-by: Sourab Gupta Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161107194957.3385-3-robert@sixbynine.org --- drivers/gpu/drm/i915/gvt/handlers.c | 2 +- drivers/gpu/drm/i915/i915_cmd_parser.c | 4 ++-- drivers/gpu/drm/i915/i915_reg.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 522809710312..57fb8e3cbd1f 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -2200,7 +2200,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL); - MMIO_D(OACONTROL, D_HSW); + MMIO_D(GEN7_OACONTROL, D_HSW); MMIO_D(0x2b00, D_BDW_PLUS); MMIO_D(0x2360, D_BDW_PLUS); MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL); diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index f5039f4f988f..7719aed0bc4e 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -450,7 +450,7 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = { REG64(PS_INVOCATION_COUNT), REG64(PS_DEPTH_COUNT), REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE), - REG32(OACONTROL), /* Only allowed for LRI and SRM. See below. */ + REG32(GEN7_OACONTROL), /* Only allowed for LRI and SRM. See below. */ REG64(MI_PREDICATE_SRC0), REG64(MI_PREDICATE_SRC1), REG32(GEN7_3DPRIM_END_OFFSET), @@ -1108,7 +1108,7 @@ static bool check_cmd(const struct intel_engine_cs *engine, * to the register. Hence, limit OACONTROL writes to * only MI_LOAD_REGISTER_IMM commands. */ - if (reg_addr == i915_mmio_reg_offset(OACONTROL)) { + if (reg_addr == i915_mmio_reg_offset(GEN7_OACONTROL)) { if (desc->cmd.value == MI_LOAD_REGISTER_MEM) { DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n"); return false; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c70c07a7b586..ee8707343ae4 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -615,7 +615,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define HSW_CS_GPR(n) _MMIO(0x2600 + (n) * 8) #define HSW_CS_GPR_UDW(n) _MMIO(0x2600 + (n) * 8 + 4) -#define OACONTROL _MMIO(0x2360) +#define GEN7_OACONTROL _MMIO(0x2360) #define _GEN7_PIPEA_DE_LOAD_SL 0x70068 #define _GEN7_PIPEB_DE_LOAD_SL 0x71068 -- cgit v1.2.3 From 9bbeaedb664a6d3e1cfbe6b0c2f07bf667512456 Mon Sep 17 00:00:00 2001 From: Robert Bragg Date: Mon, 7 Nov 2016 19:49:49 +0000 Subject: drm/i915: return EACCES for check_cmd() failures check_cmd() is checking whether a command adheres to certain restrictions that ensure it's safe to execute within a privileged batch buffer. Returning false implies a privilege problem, not that the command is invalid. The distinction makes the difference between allowing the buffer to be executed as an unprivileged batch buffer or returning an EINVAL error to userspace without executing anything. In a case where userspace may want to test whether it can successfully write to a register that needs privileges the distinction may be important and an EINVAL error may be considered fatal. In particular this is currently true for Mesa, which includes a test for whether OACONTROL can be written too, but Mesa treats any error when flushing a batch buffer as fatal, calling exit(1). As it is currently Mesa can gracefully handle a failure to write to OACONTROL if the command parser is disabled, but if we were to remove OACONTROL from the parser's whitelist then the returned EINVAL would break Mesa applications as they attempt an OACONTROL write. This bumps the command parser version from 7 to 8, as the change is visible to userspace. Signed-off-by: Robert Bragg Reviewed-by: Matthew Auld Reviewed-by: Sourab Gupta Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161107194957.3385-4-robert@sixbynine.org --- drivers/gpu/drm/i915/i915_cmd_parser.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 7719aed0bc4e..c9d2ecd447c5 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -1272,7 +1272,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, if (!check_cmd(engine, desc, cmd, length, is_master, &oacontrol_set)) { - ret = -EINVAL; + ret = -EACCES; break; } @@ -1333,6 +1333,9 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) * 5. GPGPU dispatch compute indirect registers. * 6. TIMESTAMP register and Haswell CS GPR registers * 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers. + * 8. Don't report cmd_check() failures as EINVAL errors to userspace; + * rely on the HW to NOOP disallowed commands as it would without + * the parser enabled. */ - return 7; + return 8; } -- cgit v1.2.3 From 10ff401df041e57afc2b1619cd252b86d0ae5f30 Mon Sep 17 00:00:00 2001 From: Robert Bragg Date: Tue, 8 Nov 2016 12:51:48 +0000 Subject: drm/i915: don't whitelist oacontrol in cmd parser Being able to program OACONTROL from a non-privileged batch buffer is not sufficient to be able to configure the OA unit. This was originally allowed to help enable Mesa to expose OA counters via the INTEL_performance_query extension, but the current implementation based on programming OACONTROL via a batch buffer isn't able to report useable data without a more complete OA unit configuration. Mesa handles the possibility that writes to OACONTROL may not be allowed and so only advertises the extension after explicitly testing that a write to OACONTROL succeeds. Based on this; removing OACONTROL from the whitelist should be ok for userspace. Removing this simplifies adding a new kernel api for configuring the OA unit without needing to consider the possibility that userspace might trample on OACONTROL state which we'd like to start managing within the kernel instead. In particular running any Mesa based GL application currently results in clearing OACONTROL when initializing which would disable the capturing of metrics. v2: This bumps the command parser version from 8 to 9, as the change is visible to userspace. Signed-off-by: Robert Bragg Reviewed-by: Matthew Auld Reviewed-by: Sourab Gupta Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161108125148.25007-1-robert@sixbynine.org --- drivers/gpu/drm/i915/i915_cmd_parser.c | 42 ++++------------------------------ 1 file changed, 5 insertions(+), 37 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index c9d2ecd447c5..f5762cde288b 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -450,7 +450,6 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = { REG64(PS_INVOCATION_COUNT), REG64(PS_DEPTH_COUNT), REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE), - REG32(GEN7_OACONTROL), /* Only allowed for LRI and SRM. See below. */ REG64(MI_PREDICATE_SRC0), REG64(MI_PREDICATE_SRC1), REG32(GEN7_3DPRIM_END_OFFSET), @@ -1060,8 +1059,7 @@ bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine) static bool check_cmd(const struct intel_engine_cs *engine, const struct drm_i915_cmd_descriptor *desc, const u32 *cmd, u32 length, - const bool is_master, - bool *oacontrol_set) + const bool is_master) { if (desc->flags & CMD_DESC_SKIP) return true; @@ -1098,31 +1096,6 @@ static bool check_cmd(const struct intel_engine_cs *engine, return false; } - /* - * OACONTROL requires some special handling for - * writes. We want to make sure that any batch which - * enables OA also disables it before the end of the - * batch. The goal is to prevent one process from - * snooping on the perf data from another process. To do - * that, we need to check the value that will be written - * to the register. Hence, limit OACONTROL writes to - * only MI_LOAD_REGISTER_IMM commands. - */ - if (reg_addr == i915_mmio_reg_offset(GEN7_OACONTROL)) { - if (desc->cmd.value == MI_LOAD_REGISTER_MEM) { - DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n"); - return false; - } - - if (desc->cmd.value == MI_LOAD_REGISTER_REG) { - DRM_DEBUG_DRIVER("CMD: Rejected LRR to OACONTROL\n"); - return false; - } - - if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1)) - *oacontrol_set = (cmd[offset + 1] != 0); - } - /* * Check the value written to the register against the * allowed mask/value pair given in the whitelist entry. @@ -1214,7 +1187,6 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, u32 *cmd, *batch_end; struct drm_i915_cmd_descriptor default_desc = noop_desc; const struct drm_i915_cmd_descriptor *desc = &default_desc; - bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */ bool needs_clflush_after = false; int ret = 0; @@ -1270,8 +1242,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, break; } - if (!check_cmd(engine, desc, cmd, length, is_master, - &oacontrol_set)) { + if (!check_cmd(engine, desc, cmd, length, is_master)) { ret = -EACCES; break; } @@ -1279,11 +1250,6 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, cmd += length; } - if (oacontrol_set) { - DRM_DEBUG_DRIVER("CMD: batch set OACONTROL but did not clear it\n"); - ret = -EINVAL; - } - if (cmd >= batch_end) { DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n"); ret = -EINVAL; @@ -1336,6 +1302,8 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) * 8. Don't report cmd_check() failures as EINVAL errors to userspace; * rely on the HW to NOOP disallowed commands as it would without * the parser enabled. + * 9. Don't whitelist or handle oacontrol specially, as ownership + * for oacontrol state is moving to i915-perf. */ - return 8; + return 9; } -- cgit v1.2.3 From 8a3003dd93e2db2e7c8edc316e2978f750ca0dc2 Mon Sep 17 00:00:00 2001 From: Robert Bragg Date: Mon, 7 Nov 2016 19:49:51 +0000 Subject: drm/i915: Add 'render basic' Haswell OA unit config Adds a static OA unit, MUX + B Counter configuration for basic render metrics on Haswell. This is auto generated from an XML description of metric sets, currently maintained in gputop, ref: https://github.com/rib/gputop > gputop-data/oa-*.xml > scripts/i915-perf-kernelgen.py $ make -C gputop-data -f Makefile.xml SYSFS=0 WHITELIST=RenderBasic Signed-off-by: Robert Bragg Reviewed-by: Matthew Auld Reviewed-by: Sourab Gupta Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161107194957.3385-6-robert@sixbynine.org --- drivers/gpu/drm/i915/Makefile | 3 +- drivers/gpu/drm/i915/i915_drv.h | 14 ++++ drivers/gpu/drm/i915/i915_oa_hsw.c | 144 +++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_oa_hsw.h | 34 +++++++++ 4 files changed, 194 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/i915/i915_oa_hsw.c create mode 100644 drivers/gpu/drm/i915/i915_oa_hsw.h (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 3e40d8f51f90..580602da899a 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -118,7 +118,8 @@ i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o i915-y += i915_vgpu.o # perf code -i915-y += i915_perf.o +i915-y += i915_perf.o \ + i915_oa_hsw.o ifeq ($(CONFIG_DRM_I915_GVT),y) i915-y += intel_gvt.o diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b69f8445af7e..b11506f65936 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1797,6 +1797,11 @@ struct intel_wm_config { bool sprites_scaled; }; +struct i915_oa_reg { + i915_reg_t addr; + u32 value; +}; + struct i915_perf_stream; struct i915_perf_stream_ops { @@ -2174,6 +2179,15 @@ struct drm_i915_private { bool initialized; struct mutex lock; struct list_head streams; + + struct { + u32 metrics_set; + + const struct i915_oa_reg *mux_regs; + int mux_regs_len; + const struct i915_oa_reg *b_counter_regs; + int b_counter_regs_len; + } oa; } perf; /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/i915_oa_hsw.c new file mode 100644 index 000000000000..8906380d9177 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_oa_hsw.c @@ -0,0 +1,144 @@ +/* + * Autogenerated file, DO NOT EDIT manually! + * + * Copyright (c) 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "i915_drv.h" +#include "i915_oa_hsw.h" + +enum metric_set_id { + METRIC_SET_ID_RENDER_BASIC = 1, +}; + +int i915_oa_n_builtin_metric_sets_hsw = 1; + +static const struct i915_oa_reg b_counter_config_render_basic[] = { + { _MMIO(0x2724), 0x00800000 }, + { _MMIO(0x2720), 0x00000000 }, + { _MMIO(0x2714), 0x00800000 }, + { _MMIO(0x2710), 0x00000000 }, +}; + +static const struct i915_oa_reg mux_config_render_basic[] = { + { _MMIO(0x253a4), 0x01600000 }, + { _MMIO(0x25440), 0x00100000 }, + { _MMIO(0x25128), 0x00000000 }, + { _MMIO(0x2691c), 0x00000800 }, + { _MMIO(0x26aa0), 0x01500000 }, + { _MMIO(0x26b9c), 0x00006000 }, + { _MMIO(0x2791c), 0x00000800 }, + { _MMIO(0x27aa0), 0x01500000 }, + { _MMIO(0x27b9c), 0x00006000 }, + { _MMIO(0x2641c), 0x00000400 }, + { _MMIO(0x25380), 0x00000010 }, + { _MMIO(0x2538c), 0x00000000 }, + { _MMIO(0x25384), 0x0800aaaa }, + { _MMIO(0x25400), 0x00000004 }, + { _MMIO(0x2540c), 0x06029000 }, + { _MMIO(0x25410), 0x00000002 }, + { _MMIO(0x25404), 0x5c30ffff }, + { _MMIO(0x25100), 0x00000016 }, + { _MMIO(0x25110), 0x00000400 }, + { _MMIO(0x25104), 0x00000000 }, + { _MMIO(0x26804), 0x00001211 }, + { _MMIO(0x26884), 0x00000100 }, + { _MMIO(0x26900), 0x00000002 }, + { _MMIO(0x26908), 0x00700000 }, + { _MMIO(0x26904), 0x00000000 }, + { _MMIO(0x26984), 0x00001022 }, + { _MMIO(0x26a04), 0x00000011 }, + { _MMIO(0x26a80), 0x00000006 }, + { _MMIO(0x26a88), 0x00000c02 }, + { _MMIO(0x26a84), 0x00000000 }, + { _MMIO(0x26b04), 0x00001000 }, + { _MMIO(0x26b80), 0x00000002 }, + { _MMIO(0x26b8c), 0x00000007 }, + { _MMIO(0x26b84), 0x00000000 }, + { _MMIO(0x27804), 0x00004844 }, + { _MMIO(0x27884), 0x00000400 }, + { _MMIO(0x27900), 0x00000002 }, + { _MMIO(0x27908), 0x0e000000 }, + { _MMIO(0x27904), 0x00000000 }, + { _MMIO(0x27984), 0x00004088 }, + { _MMIO(0x27a04), 0x00000044 }, + { _MMIO(0x27a80), 0x00000006 }, + { _MMIO(0x27a88), 0x00018040 }, + { _MMIO(0x27a84), 0x00000000 }, + { _MMIO(0x27b04), 0x00004000 }, + { _MMIO(0x27b80), 0x00000002 }, + { _MMIO(0x27b8c), 0x000000e0 }, + { _MMIO(0x27b84), 0x00000000 }, + { _MMIO(0x26104), 0x00002222 }, + { _MMIO(0x26184), 0x0c006666 }, + { _MMIO(0x26284), 0x04000000 }, + { _MMIO(0x26304), 0x04000000 }, + { _MMIO(0x26400), 0x00000002 }, + { _MMIO(0x26410), 0x000000a0 }, + { _MMIO(0x26404), 0x00000000 }, + { _MMIO(0x25420), 0x04108020 }, + { _MMIO(0x25424), 0x1284a420 }, + { _MMIO(0x2541c), 0x00000000 }, + { _MMIO(0x25428), 0x00042049 }, +}; + +static const struct i915_oa_reg * +get_render_basic_mux_config(struct drm_i915_private *dev_priv, + int *len) +{ + *len = ARRAY_SIZE(mux_config_render_basic); + return mux_config_render_basic; +} + +int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv) +{ + dev_priv->perf.oa.mux_regs = NULL; + dev_priv->perf.oa.mux_regs_len = 0; + dev_priv->perf.oa.b_counter_regs = NULL; + dev_priv->perf.oa.b_counter_regs_len = 0; + + switch (dev_priv->perf.oa.metrics_set) { + case METRIC_SET_ID_RENDER_BASIC: + dev_priv->perf.oa.mux_regs = + get_render_basic_mux_config(dev_priv, + &dev_priv->perf.oa.mux_regs_len); + if (!dev_priv->perf.oa.mux_regs) { + DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set"); + + /* EINVAL because *_register_sysfs already checked this + * and so it wouldn't have been advertised so userspace and + * so shouldn't have been requested + */ + return -EINVAL; + } + + dev_priv->perf.oa.b_counter_regs = + b_counter_config_render_basic; + dev_priv->perf.oa.b_counter_regs_len = + ARRAY_SIZE(b_counter_config_render_basic); + + return 0; + default: + return -ENODEV; + } +} diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.h b/drivers/gpu/drm/i915/i915_oa_hsw.h new file mode 100644 index 000000000000..b618a1f0aa94 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_oa_hsw.h @@ -0,0 +1,34 @@ +/* + * Autogenerated file, DO NOT EDIT manually! + * + * Copyright (c) 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __I915_OA_HSW_H__ +#define __I915_OA_HSW_H__ + +extern int i915_oa_n_builtin_metric_sets_hsw; + +extern int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv); + +#endif -- cgit v1.2.3 From d79651522e89c4ffa8992b48dfe449f0c583f809 Mon Sep 17 00:00:00 2001 From: Robert Bragg Date: Mon, 7 Nov 2016 19:49:52 +0000 Subject: drm/i915: Enable i915 perf stream for Haswell OA unit Gen graphics hardware can be set up to periodically write snapshots of performance counters into a circular buffer via its Observation Architecture and this patch exposes that capability to userspace via the i915 perf interface. v2: Make sure to initialize ->specific_ctx_id when opening, without relying on _pin_notify hook, in case ctx already pinned. v3: Revert back to pinning ctx upfront when opening stream, removing need to hook in to pinning and to update OACONTROL on the fly. Signed-off-by: Robert Bragg Signed-off-by: Zhenyu Wang Cc: Chris Wilson Reviewed-by: Matthew Auld Reviewed-by: Sourab Gupta Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161107194957.3385-7-robert@sixbynine.org --- drivers/gpu/drm/i915/i915_drv.h | 66 ++- drivers/gpu/drm/i915/i915_perf.c | 1036 +++++++++++++++++++++++++++++++++++++- drivers/gpu/drm/i915/i915_reg.h | 338 +++++++++++++ include/uapi/drm/i915_drm.h | 71 ++- 4 files changed, 1482 insertions(+), 29 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b11506f65936..3a69e219c7a2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1797,6 +1797,11 @@ struct intel_wm_config { bool sprites_scaled; }; +struct i915_oa_format { + u32 format; + int size; +}; + struct i915_oa_reg { i915_reg_t addr; u32 value; @@ -1817,11 +1822,6 @@ struct i915_perf_stream_ops { */ void (*disable)(struct i915_perf_stream *stream); - /* Return: true if any i915 perf records are ready to read() - * for this stream. - */ - bool (*can_read)(struct i915_perf_stream *stream); - /* Call poll_wait, passing a wait queue that will be woken * once there is something ready to read() for the stream */ @@ -1831,9 +1831,7 @@ struct i915_perf_stream_ops { /* For handling a blocking read, wait until there is something * to ready to read() for the stream. E.g. wait on the same - * wait queue that would be passed to poll_wait() until - * ->can_read() returns true (if its safe to call ->can_read() - * without the i915 perf lock held). + * wait queue that would be passed to poll_wait(). */ int (*wait_unlocked)(struct i915_perf_stream *stream); @@ -1873,11 +1871,28 @@ struct i915_perf_stream { struct list_head link; u32 sample_flags; + int sample_size; struct i915_gem_context *ctx; bool enabled; - struct i915_perf_stream_ops *ops; + const struct i915_perf_stream_ops *ops; +}; + +struct i915_oa_ops { + void (*init_oa_buffer)(struct drm_i915_private *dev_priv); + int (*enable_metric_set)(struct drm_i915_private *dev_priv); + void (*disable_metric_set)(struct drm_i915_private *dev_priv); + void (*oa_enable)(struct drm_i915_private *dev_priv); + void (*oa_disable)(struct drm_i915_private *dev_priv); + void (*update_oacontrol)(struct drm_i915_private *dev_priv); + void (*update_hw_ctx_id_locked)(struct drm_i915_private *dev_priv, + u32 ctx_id); + int (*read)(struct i915_perf_stream *stream, + char __user *buf, + size_t count, + size_t *offset); + bool (*oa_buffer_is_empty)(struct drm_i915_private *dev_priv); }; struct drm_i915_private { @@ -2177,16 +2192,47 @@ struct drm_i915_private { struct { bool initialized; + struct mutex lock; struct list_head streams; + spinlock_t hook_lock; + struct { - u32 metrics_set; + struct i915_perf_stream *exclusive_stream; + + u32 specific_ctx_id; + struct i915_vma *pinned_rcs_vma; + + struct hrtimer poll_check_timer; + wait_queue_head_t poll_wq; + bool pollin; + + bool periodic; + int period_exponent; + int timestamp_frequency; + + int tail_margin; + + int metrics_set; const struct i915_oa_reg *mux_regs; int mux_regs_len; const struct i915_oa_reg *b_counter_regs; int b_counter_regs_len; + + struct { + struct i915_vma *vma; + u8 *vaddr; + int format; + int format_size; + } oa_buffer; + + u32 gen7_latched_oastatus1; + + struct i915_oa_ops ops; + const struct i915_oa_format *oa_formats; + int n_builtin_sets; } oa; } perf; diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 777ce65f910d..54653bcf0d79 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -25,16 +25,899 @@ */ #include +#include #include "i915_drv.h" +#include "i915_oa_hsw.h" + +/* HW requires this to be a power of two, between 128k and 16M, though driver + * is currently generally designed assuming the largest 16M size is used such + * that the overflow cases are unlikely in normal operation. + */ +#define OA_BUFFER_SIZE SZ_16M + +#define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1)) + +/* There's a HW race condition between OA unit tail pointer register updates and + * writes to memory whereby the tail pointer can sometimes get ahead of what's + * been written out to the OA buffer so far. + * + * Although this can be observed explicitly by checking for a zeroed report-id + * field in tail reports, it seems preferable to account for this earlier e.g. + * as part of the _oa_buffer_is_empty checks to minimize -EAGAIN polling cycles + * in this situation. + * + * To give time for the most recent reports to land before they may be copied to + * userspace, the driver operates as if the tail pointer effectively lags behind + * the HW tail pointer by 'tail_margin' bytes. The margin in bytes is calculated + * based on this constant in nanoseconds, the current OA sampling exponent + * and current report size. + * + * There is also a fallback check while reading to simply skip over reports with + * a zeroed report-id. + */ +#define OA_TAIL_MARGIN_NSEC 100000ULL + +/* frequency for checking whether the OA unit has written new reports to the + * circular OA buffer... + */ +#define POLL_FREQUENCY 200 +#define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY) + +/* The maximum exponent the hardware accepts is 63 (essentially it selects one + * of the 64bit timestamp bits to trigger reports from) but there's currently + * no known use case for sampling as infrequently as once per 47 thousand years. + * + * Since the timestamps included in OA reports are only 32bits it seems + * reasonable to limit the OA exponent where it's still possible to account for + * overflow in OA report timestamps. + */ +#define OA_EXPONENT_MAX 31 + +#define INVALID_CTX_ID 0xffffffff + + +/* XXX: beware if future OA HW adds new report formats that the current + * code assumes all reports have a power-of-two size and ~(size - 1) can + * be used as a mask to align the OA tail pointer. + */ +static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = { + [I915_OA_FORMAT_A13] = { 0, 64 }, + [I915_OA_FORMAT_A29] = { 1, 128 }, + [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 }, + /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */ + [I915_OA_FORMAT_B4_C8] = { 4, 64 }, + [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 }, + [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 }, + [I915_OA_FORMAT_C4_B8] = { 7, 64 }, +}; + +#define SAMPLE_OA_REPORT (1<<0) struct perf_open_properties { u32 sample_flags; u64 single_context:1; u64 ctx_handle; + + /* OA sampling state */ + int metrics_set; + int oa_format; + bool oa_periodic; + int oa_period_exponent; +}; + +/* NB: This is either called via fops or the poll check hrtimer (atomic ctx) + * + * It's safe to read OA config state here unlocked, assuming that this is only + * called while the stream is enabled, while the global OA configuration can't + * be modified. + * + * Note: we don't lock around the head/tail reads even though there's the slim + * possibility of read() fop errors forcing a re-init of the OA buffer + * pointers. A race here could result in a false positive !empty status which + * is acceptable. + */ +static bool gen7_oa_buffer_is_empty_fop_unlocked(struct drm_i915_private *dev_priv) +{ + int report_size = dev_priv->perf.oa.oa_buffer.format_size; + u32 oastatus2 = I915_READ(GEN7_OASTATUS2); + u32 oastatus1 = I915_READ(GEN7_OASTATUS1); + u32 head = oastatus2 & GEN7_OASTATUS2_HEAD_MASK; + u32 tail = oastatus1 & GEN7_OASTATUS1_TAIL_MASK; + + return OA_TAKEN(tail, head) < + dev_priv->perf.oa.tail_margin + report_size; +} + +/** + * Appends a status record to a userspace read() buffer. + */ +static int append_oa_status(struct i915_perf_stream *stream, + char __user *buf, + size_t count, + size_t *offset, + enum drm_i915_perf_record_type type) +{ + struct drm_i915_perf_record_header header = { type, 0, sizeof(header) }; + + if ((count - *offset) < header.size) + return -ENOSPC; + + if (copy_to_user(buf + *offset, &header, sizeof(header))) + return -EFAULT; + + (*offset) += header.size; + + return 0; +} + +/** + * Copies single OA report into userspace read() buffer. + */ +static int append_oa_sample(struct i915_perf_stream *stream, + char __user *buf, + size_t count, + size_t *offset, + const u8 *report) +{ + struct drm_i915_private *dev_priv = stream->dev_priv; + int report_size = dev_priv->perf.oa.oa_buffer.format_size; + struct drm_i915_perf_record_header header; + u32 sample_flags = stream->sample_flags; + + header.type = DRM_I915_PERF_RECORD_SAMPLE; + header.pad = 0; + header.size = stream->sample_size; + + if ((count - *offset) < header.size) + return -ENOSPC; + + buf += *offset; + if (copy_to_user(buf, &header, sizeof(header))) + return -EFAULT; + buf += sizeof(header); + + if (sample_flags & SAMPLE_OA_REPORT) { + if (copy_to_user(buf, report, report_size)) + return -EFAULT; + } + + (*offset) += header.size; + + return 0; +} + +/** + * Copies all buffered OA reports into userspace read() buffer. + * @stream: An i915-perf stream opened for OA metrics + * @buf: destination buffer given by userspace + * @count: the number of bytes userspace wants to read + * @offset: (inout): the current position for writing into @buf + * @head_ptr: (inout): the current oa buffer cpu read position + * @tail: the current oa buffer gpu write position + * + * Returns 0 on success, negative error code on failure. + * + * Notably any error condition resulting in a short read (-ENOSPC or + * -EFAULT) will be returned even though one or more records may + * have been successfully copied. In this case it's up to the caller + * to decide if the error should be squashed before returning to + * userspace. + * + * Note: reports are consumed from the head, and appended to the + * tail, so the head chases the tail?... If you think that's mad + * and back-to-front you're not alone, but this follows the + * Gen PRM naming convention. + */ +static int gen7_append_oa_reports(struct i915_perf_stream *stream, + char __user *buf, + size_t count, + size_t *offset, + u32 *head_ptr, + u32 tail) +{ + struct drm_i915_private *dev_priv = stream->dev_priv; + int report_size = dev_priv->perf.oa.oa_buffer.format_size; + u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr; + int tail_margin = dev_priv->perf.oa.tail_margin; + u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma); + u32 mask = (OA_BUFFER_SIZE - 1); + u32 head; + u32 taken; + int ret = 0; + + if (WARN_ON(!stream->enabled)) + return -EIO; + + head = *head_ptr - gtt_offset; + tail -= gtt_offset; + + /* The OA unit is expected to wrap the tail pointer according to the OA + * buffer size and since we should never write a misaligned head + * pointer we don't expect to read one back either... + */ + if (tail > OA_BUFFER_SIZE || head > OA_BUFFER_SIZE || + head % report_size) { + DRM_ERROR("Inconsistent OA buffer pointer (head = %u, tail = %u): force restart\n", + head, tail); + dev_priv->perf.oa.ops.oa_disable(dev_priv); + dev_priv->perf.oa.ops.oa_enable(dev_priv); + *head_ptr = I915_READ(GEN7_OASTATUS2) & + GEN7_OASTATUS2_HEAD_MASK; + return -EIO; + } + + + /* The tail pointer increases in 64 byte increments, not in report_size + * steps... + */ + tail &= ~(report_size - 1); + + /* Move the tail pointer back by the current tail_margin to account for + * the possibility that the latest reports may not have really landed + * in memory yet... + */ + + if (OA_TAKEN(tail, head) < report_size + tail_margin) + return -EAGAIN; + + tail -= tail_margin; + tail &= mask; + + for (/* none */; + (taken = OA_TAKEN(tail, head)); + head = (head + report_size) & mask) { + u8 *report = oa_buf_base + head; + u32 *report32 = (void *)report; + + /* All the report sizes factor neatly into the buffer + * size so we never expect to see a report split + * between the beginning and end of the buffer. + * + * Given the initial alignment check a misalignment + * here would imply a driver bug that would result + * in an overrun. + */ + if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) { + DRM_ERROR("Spurious OA head ptr: non-integral report offset\n"); + break; + } + + /* The report-ID field for periodic samples includes + * some undocumented flags related to what triggered + * the report and is never expected to be zero so we + * can check that the report isn't invalid before + * copying it to userspace... + */ + if (report32[0] == 0) { + DRM_ERROR("Skipping spurious, invalid OA report\n"); + continue; + } + + ret = append_oa_sample(stream, buf, count, offset, report); + if (ret) + break; + + /* The above report-id field sanity check is based on + * the assumption that the OA buffer is initially + * zeroed and we reset the field after copying so the + * check is still meaningful once old reports start + * being overwritten. + */ + report32[0] = 0; + } + + *head_ptr = gtt_offset + head; + + return ret; +} + +static int gen7_oa_read(struct i915_perf_stream *stream, + char __user *buf, + size_t count, + size_t *offset) +{ + struct drm_i915_private *dev_priv = stream->dev_priv; + int report_size = dev_priv->perf.oa.oa_buffer.format_size; + u32 oastatus2; + u32 oastatus1; + u32 head; + u32 tail; + int ret; + + if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr)) + return -EIO; + + oastatus2 = I915_READ(GEN7_OASTATUS2); + oastatus1 = I915_READ(GEN7_OASTATUS1); + + head = oastatus2 & GEN7_OASTATUS2_HEAD_MASK; + tail = oastatus1 & GEN7_OASTATUS1_TAIL_MASK; + + /* XXX: On Haswell we don't have a safe way to clear oastatus1 + * bits while the OA unit is enabled (while the tail pointer + * may be updated asynchronously) so we ignore status bits + * that have already been reported to userspace. + */ + oastatus1 &= ~dev_priv->perf.oa.gen7_latched_oastatus1; + + /* We treat OABUFFER_OVERFLOW as a significant error: + * + * - The status can be interpreted to mean that the buffer is + * currently full (with a higher precedence than OA_TAKEN() + * which will start to report a near-empty buffer after an + * overflow) but it's awkward that we can't clear the status + * on Haswell, so without a reset we won't be able to catch + * the state again. + * + * - Since it also implies the HW has started overwriting old + * reports it may also affect our sanity checks for invalid + * reports when copying to userspace that assume new reports + * are being written to cleared memory. + * + * - In the future we may want to introduce a flight recorder + * mode where the driver will automatically maintain a safe + * guard band between head/tail, avoiding this overflow + * condition, but we avoid the added driver complexity for + * now. + */ + if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) { + ret = append_oa_status(stream, buf, count, offset, + DRM_I915_PERF_RECORD_OA_BUFFER_LOST); + if (ret) + return ret; + + DRM_ERROR("OA buffer overflow: force restart\n"); + + dev_priv->perf.oa.ops.oa_disable(dev_priv); + dev_priv->perf.oa.ops.oa_enable(dev_priv); + + oastatus2 = I915_READ(GEN7_OASTATUS2); + oastatus1 = I915_READ(GEN7_OASTATUS1); + + head = oastatus2 & GEN7_OASTATUS2_HEAD_MASK; + tail = oastatus1 & GEN7_OASTATUS1_TAIL_MASK; + } + + if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) { + ret = append_oa_status(stream, buf, count, offset, + DRM_I915_PERF_RECORD_OA_REPORT_LOST); + if (ret) + return ret; + dev_priv->perf.oa.gen7_latched_oastatus1 |= + GEN7_OASTATUS1_REPORT_LOST; + } + + ret = gen7_append_oa_reports(stream, buf, count, offset, + &head, tail); + + /* All the report sizes are a power of two and the + * head should always be incremented by some multiple + * of the report size. + * + * A warning here, but notably if we later read back a + * misaligned pointer we will treat that as a bug since + * it could lead to a buffer overrun. + */ + WARN_ONCE(head & (report_size - 1), + "i915: Writing misaligned OA head pointer"); + + /* Note: we update the head pointer here even if an error + * was returned since the error may represent a short read + * where some some reports were successfully copied. + */ + I915_WRITE(GEN7_OASTATUS2, + ((head & GEN7_OASTATUS2_HEAD_MASK) | + OA_MEM_SELECT_GGTT)); + + return ret; +} + +static int i915_oa_wait_unlocked(struct i915_perf_stream *stream) +{ + struct drm_i915_private *dev_priv = stream->dev_priv; + + /* We would wait indefinitely if periodic sampling is not enabled */ + if (!dev_priv->perf.oa.periodic) + return -EIO; + + /* Note: the oa_buffer_is_empty() condition is ok to run unlocked as it + * just performs mmio reads of the OA buffer head + tail pointers and + * it's assumed we're handling some operation that implies the stream + * can't be destroyed until completion (such as a read()) that ensures + * the device + OA buffer can't disappear + */ + return wait_event_interruptible(dev_priv->perf.oa.poll_wq, + !dev_priv->perf.oa.ops.oa_buffer_is_empty(dev_priv)); +} + +static void i915_oa_poll_wait(struct i915_perf_stream *stream, + struct file *file, + poll_table *wait) +{ + struct drm_i915_private *dev_priv = stream->dev_priv; + + poll_wait(file, &dev_priv->perf.oa.poll_wq, wait); +} + +static int i915_oa_read(struct i915_perf_stream *stream, + char __user *buf, + size_t count, + size_t *offset) +{ + struct drm_i915_private *dev_priv = stream->dev_priv; + + return dev_priv->perf.oa.ops.read(stream, buf, count, offset); +} + +/* Determine the render context hw id, and ensure it remains fixed for the + * lifetime of the stream. This ensures that we don't have to worry about + * updating the context ID in OACONTROL on the fly. + */ +static int oa_get_render_ctx_id(struct i915_perf_stream *stream) +{ + struct drm_i915_private *dev_priv = stream->dev_priv; + struct i915_vma *vma; + int ret; + + ret = i915_mutex_lock_interruptible(&dev_priv->drm); + if (ret) + return ret; + + /* As the ID is the gtt offset of the context's vma we pin + * the vma to ensure the ID remains fixed. + * + * NB: implied RCS engine... + */ + vma = i915_gem_context_pin_legacy(stream->ctx, 0); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto unlock; + } + + dev_priv->perf.oa.pinned_rcs_vma = vma; + + /* Explicitly track the ID (instead of calling i915_ggtt_offset() + * on the fly) considering the difference with gen8+ and + * execlists + */ + dev_priv->perf.oa.specific_ctx_id = i915_ggtt_offset(vma); + +unlock: + mutex_unlock(&dev_priv->drm.struct_mutex); + + return ret; +} + +static void oa_put_render_ctx_id(struct i915_perf_stream *stream) +{ + struct drm_i915_private *dev_priv = stream->dev_priv; + + mutex_lock(&dev_priv->drm.struct_mutex); + + i915_vma_unpin(dev_priv->perf.oa.pinned_rcs_vma); + dev_priv->perf.oa.pinned_rcs_vma = NULL; + + dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; + + mutex_unlock(&dev_priv->drm.struct_mutex); +} + +static void +free_oa_buffer(struct drm_i915_private *i915) +{ + mutex_lock(&i915->drm.struct_mutex); + + i915_gem_object_unpin_map(i915->perf.oa.oa_buffer.vma->obj); + i915_vma_unpin(i915->perf.oa.oa_buffer.vma); + i915_gem_object_put(i915->perf.oa.oa_buffer.vma->obj); + + i915->perf.oa.oa_buffer.vma = NULL; + i915->perf.oa.oa_buffer.vaddr = NULL; + + mutex_unlock(&i915->drm.struct_mutex); +} + +static void i915_oa_stream_destroy(struct i915_perf_stream *stream) +{ + struct drm_i915_private *dev_priv = stream->dev_priv; + + BUG_ON(stream != dev_priv->perf.oa.exclusive_stream); + + dev_priv->perf.oa.ops.disable_metric_set(dev_priv); + + free_oa_buffer(dev_priv); + + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_runtime_pm_put(dev_priv); + + if (stream->ctx) + oa_put_render_ctx_id(stream); + + dev_priv->perf.oa.exclusive_stream = NULL; +} + +static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv) +{ + u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma); + + /* Pre-DevBDW: OABUFFER must be set with counters off, + * before OASTATUS1, but after OASTATUS2 + */ + I915_WRITE(GEN7_OASTATUS2, gtt_offset | OA_MEM_SELECT_GGTT); /* head */ + I915_WRITE(GEN7_OABUFFER, gtt_offset); + I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */ + + /* On Haswell we have to track which OASTATUS1 flags we've + * already seen since they can't be cleared while periodic + * sampling is enabled. + */ + dev_priv->perf.oa.gen7_latched_oastatus1 = 0; + + /* NB: although the OA buffer will initially be allocated + * zeroed via shmfs (and so this memset is redundant when + * first allocating), we may re-init the OA buffer, either + * when re-enabling a stream or in error/reset paths. + * + * The reason we clear the buffer for each re-init is for the + * sanity check in gen7_append_oa_reports() that looks at the + * report-id field to make sure it's non-zero which relies on + * the assumption that new reports are being written to zeroed + * memory... + */ + memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE); + + /* Maybe make ->pollin per-stream state if we support multiple + * concurrent streams in the future. + */ + dev_priv->perf.oa.pollin = false; +} + +static int alloc_oa_buffer(struct drm_i915_private *dev_priv) +{ + struct drm_i915_gem_object *bo; + struct i915_vma *vma; + int ret; + + if (WARN_ON(dev_priv->perf.oa.oa_buffer.vma)) + return -ENODEV; + + ret = i915_mutex_lock_interruptible(&dev_priv->drm); + if (ret) + return ret; + + BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE); + BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M); + + bo = i915_gem_object_create(&dev_priv->drm, OA_BUFFER_SIZE); + if (IS_ERR(bo)) { + DRM_ERROR("Failed to allocate OA buffer\n"); + ret = PTR_ERR(bo); + goto unlock; + } + + ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC); + if (ret) + goto err_unref; + + /* PreHSW required 512K alignment, HSW requires 16M */ + vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err_unref; + } + dev_priv->perf.oa.oa_buffer.vma = vma; + + dev_priv->perf.oa.oa_buffer.vaddr = + i915_gem_object_pin_map(bo, I915_MAP_WB); + if (IS_ERR(dev_priv->perf.oa.oa_buffer.vaddr)) { + ret = PTR_ERR(dev_priv->perf.oa.oa_buffer.vaddr); + goto err_unpin; + } + + dev_priv->perf.oa.ops.init_oa_buffer(dev_priv); + + DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n", + i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma), + dev_priv->perf.oa.oa_buffer.vaddr); + + goto unlock; + +err_unpin: + __i915_vma_unpin(vma); + +err_unref: + i915_gem_object_put(bo); + + dev_priv->perf.oa.oa_buffer.vaddr = NULL; + dev_priv->perf.oa.oa_buffer.vma = NULL; + +unlock: + mutex_unlock(&dev_priv->drm.struct_mutex); + return ret; +} + +static void config_oa_regs(struct drm_i915_private *dev_priv, + const struct i915_oa_reg *regs, + int n_regs) +{ + int i; + + for (i = 0; i < n_regs; i++) { + const struct i915_oa_reg *reg = regs + i; + + I915_WRITE(reg->addr, reg->value); + } +} + +static int hsw_enable_metric_set(struct drm_i915_private *dev_priv) +{ + int ret = i915_oa_select_metric_set_hsw(dev_priv); + + if (ret) + return ret; + + I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) | + GT_NOA_ENABLE)); + + /* PRM: + * + * OA unit is using “crclk” for its functionality. When trunk + * level clock gating takes place, OA clock would be gated, + * unable to count the events from non-render clock domain. + * Render clock gating must be disabled when OA is enabled to + * count the events from non-render domain. Unit level clock + * gating for RCS should also be disabled. + */ + I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & + ~GEN7_DOP_CLOCK_GATE_ENABLE)); + I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) | + GEN6_CSUNIT_CLOCK_GATE_DISABLE)); + + config_oa_regs(dev_priv, dev_priv->perf.oa.mux_regs, + dev_priv->perf.oa.mux_regs_len); + + /* It apparently takes a fairly long time for a new MUX + * configuration to be be applied after these register writes. + * This delay duration was derived empirically based on the + * render_basic config but hopefully it covers the maximum + * configuration latency. + * + * As a fallback, the checks in _append_oa_reports() to skip + * invalid OA reports do also seem to work to discard reports + * generated before this config has completed - albeit not + * silently. + * + * Unfortunately this is essentially a magic number, since we + * don't currently know of a reliable mechanism for predicting + * how long the MUX config will take to apply and besides + * seeing invalid reports we don't know of a reliable way to + * explicitly check that the MUX config has landed. + * + * It's even possible we've miss characterized the underlying + * problem - it just seems like the simplest explanation why + * a delay at this location would mitigate any invalid reports. + */ + usleep_range(15000, 20000); + + config_oa_regs(dev_priv, dev_priv->perf.oa.b_counter_regs, + dev_priv->perf.oa.b_counter_regs_len); + + return 0; +} + +static void hsw_disable_metric_set(struct drm_i915_private *dev_priv) +{ + I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) & + ~GEN6_CSUNIT_CLOCK_GATE_DISABLE)); + I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) | + GEN7_DOP_CLOCK_GATE_ENABLE)); + + I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) & + ~GT_NOA_ENABLE)); +} + +static void gen7_update_oacontrol_locked(struct drm_i915_private *dev_priv) +{ + assert_spin_locked(&dev_priv->perf.hook_lock); + + if (dev_priv->perf.oa.exclusive_stream->enabled) { + struct i915_gem_context *ctx = + dev_priv->perf.oa.exclusive_stream->ctx; + u32 ctx_id = dev_priv->perf.oa.specific_ctx_id; + + bool periodic = dev_priv->perf.oa.periodic; + u32 period_exponent = dev_priv->perf.oa.period_exponent; + u32 report_format = dev_priv->perf.oa.oa_buffer.format; + + I915_WRITE(GEN7_OACONTROL, + (ctx_id & GEN7_OACONTROL_CTX_MASK) | + (period_exponent << + GEN7_OACONTROL_TIMER_PERIOD_SHIFT) | + (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) | + (report_format << GEN7_OACONTROL_FORMAT_SHIFT) | + (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) | + GEN7_OACONTROL_ENABLE); + } else + I915_WRITE(GEN7_OACONTROL, 0); +} + +static void gen7_oa_enable(struct drm_i915_private *dev_priv) +{ + unsigned long flags; + + /* Reset buf pointers so we don't forward reports from before now. + * + * Think carefully if considering trying to avoid this, since it + * also ensures status flags and the buffer itself are cleared + * in error paths, and we have checks for invalid reports based + * on the assumption that certain fields are written to zeroed + * memory which this helps maintains. + */ + gen7_init_oa_buffer(dev_priv); + + spin_lock_irqsave(&dev_priv->perf.hook_lock, flags); + gen7_update_oacontrol_locked(dev_priv); + spin_unlock_irqrestore(&dev_priv->perf.hook_lock, flags); +} + +static void i915_oa_stream_enable(struct i915_perf_stream *stream) +{ + struct drm_i915_private *dev_priv = stream->dev_priv; + + dev_priv->perf.oa.ops.oa_enable(dev_priv); + + if (dev_priv->perf.oa.periodic) + hrtimer_start(&dev_priv->perf.oa.poll_check_timer, + ns_to_ktime(POLL_PERIOD), + HRTIMER_MODE_REL_PINNED); +} + +static void gen7_oa_disable(struct drm_i915_private *dev_priv) +{ + I915_WRITE(GEN7_OACONTROL, 0); +} + +static void i915_oa_stream_disable(struct i915_perf_stream *stream) +{ + struct drm_i915_private *dev_priv = stream->dev_priv; + + dev_priv->perf.oa.ops.oa_disable(dev_priv); + + if (dev_priv->perf.oa.periodic) + hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer); +} + +static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent) +{ + return 1000000000ULL * (2ULL << exponent) / + dev_priv->perf.oa.timestamp_frequency; +} + +static const struct i915_perf_stream_ops i915_oa_stream_ops = { + .destroy = i915_oa_stream_destroy, + .enable = i915_oa_stream_enable, + .disable = i915_oa_stream_disable, + .wait_unlocked = i915_oa_wait_unlocked, + .poll_wait = i915_oa_poll_wait, + .read = i915_oa_read, }; +static int i915_oa_stream_init(struct i915_perf_stream *stream, + struct drm_i915_perf_open_param *param, + struct perf_open_properties *props) +{ + struct drm_i915_private *dev_priv = stream->dev_priv; + int format_size; + int ret; + + if (!(props->sample_flags & SAMPLE_OA_REPORT)) { + DRM_ERROR("Only OA report sampling supported\n"); + return -EINVAL; + } + + if (!dev_priv->perf.oa.ops.init_oa_buffer) { + DRM_ERROR("OA unit not supported\n"); + return -ENODEV; + } + + /* To avoid the complexity of having to accurately filter + * counter reports and marshal to the appropriate client + * we currently only allow exclusive access + */ + if (dev_priv->perf.oa.exclusive_stream) { + DRM_ERROR("OA unit already in use\n"); + return -EBUSY; + } + + if (!props->metrics_set) { + DRM_ERROR("OA metric set not specified\n"); + return -EINVAL; + } + + if (!props->oa_format) { + DRM_ERROR("OA report format not specified\n"); + return -EINVAL; + } + + stream->sample_size = sizeof(struct drm_i915_perf_record_header); + + format_size = dev_priv->perf.oa.oa_formats[props->oa_format].size; + + stream->sample_flags |= SAMPLE_OA_REPORT; + stream->sample_size += format_size; + + dev_priv->perf.oa.oa_buffer.format_size = format_size; + if (WARN_ON(dev_priv->perf.oa.oa_buffer.format_size == 0)) + return -EINVAL; + + dev_priv->perf.oa.oa_buffer.format = + dev_priv->perf.oa.oa_formats[props->oa_format].format; + + dev_priv->perf.oa.metrics_set = props->metrics_set; + + dev_priv->perf.oa.periodic = props->oa_periodic; + if (dev_priv->perf.oa.periodic) { + u64 period_ns = oa_exponent_to_ns(dev_priv, + props->oa_period_exponent); + + dev_priv->perf.oa.period_exponent = props->oa_period_exponent; + + /* See comment for OA_TAIL_MARGIN_NSEC for details + * about this tail_margin... + */ + dev_priv->perf.oa.tail_margin = + ((OA_TAIL_MARGIN_NSEC / period_ns) + 1) * format_size; + } + + if (stream->ctx) { + ret = oa_get_render_ctx_id(stream); + if (ret) + return ret; + } + + ret = alloc_oa_buffer(dev_priv); + if (ret) + goto err_oa_buf_alloc; + + /* PRM - observability performance counters: + * + * OACONTROL, performance counter enable, note: + * + * "When this bit is set, in order to have coherent counts, + * RC6 power state and trunk clock gating must be disabled. + * This can be achieved by programming MMIO registers as + * 0xA094=0 and 0xA090[31]=1" + * + * In our case we are expecting that taking pm + FORCEWAKE + * references will effectively disable RC6. + */ + intel_runtime_pm_get(dev_priv); + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + + ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv); + if (ret) + goto err_enable; + + stream->ops = &i915_oa_stream_ops; + + dev_priv->perf.oa.exclusive_stream = stream; + + return 0; + +err_enable: + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_runtime_pm_put(dev_priv); + free_oa_buffer(dev_priv); + +err_oa_buf_alloc: + if (stream->ctx) + oa_put_render_ctx_id(stream); + + return ret; +} + static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream, struct file *file, char __user *buf, @@ -78,8 +961,20 @@ static ssize_t i915_perf_read(struct file *file, struct drm_i915_private *dev_priv = stream->dev_priv; ssize_t ret; + /* To ensure it's handled consistently we simply treat all reads of a + * disabled stream as an error. In particular it might otherwise lead + * to a deadlock for blocking file descriptors... + */ + if (!stream->enabled) + return -EIO; + if (!(file->f_flags & O_NONBLOCK)) { - /* Allow false positives from stream->ops->wait_unlocked. + /* There's the small chance of false positives from + * stream->ops->wait_unlocked. + * + * E.g. with single context filtering since we only wait until + * oabuffer has >= 1 report we don't immediately know whether + * any reports really belong to the current context */ do { ret = stream->ops->wait_unlocked(stream); @@ -97,21 +992,51 @@ static ssize_t i915_perf_read(struct file *file, mutex_unlock(&dev_priv->perf.lock); } + if (ret >= 0) { + /* Maybe make ->pollin per-stream state if we support multiple + * concurrent streams in the future. + */ + dev_priv->perf.oa.pollin = false; + } + return ret; } -static unsigned int i915_perf_poll_locked(struct i915_perf_stream *stream, +static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer) +{ + struct drm_i915_private *dev_priv = + container_of(hrtimer, typeof(*dev_priv), + perf.oa.poll_check_timer); + + if (!dev_priv->perf.oa.ops.oa_buffer_is_empty(dev_priv)) { + dev_priv->perf.oa.pollin = true; + wake_up(&dev_priv->perf.oa.poll_wq); + } + + hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD)); + + return HRTIMER_RESTART; +} + +static unsigned int i915_perf_poll_locked(struct drm_i915_private *dev_priv, + struct i915_perf_stream *stream, struct file *file, poll_table *wait) { - unsigned int streams = 0; + unsigned int events = 0; stream->ops->poll_wait(stream, file, wait); - if (stream->ops->can_read(stream)) - streams |= POLLIN; + /* Note: we don't explicitly check whether there's something to read + * here since this path may be very hot depending on what else + * userspace is polling, or on the timeout in use. We rely solely on + * the hrtimer/oa_poll_check_timer_cb to notify us when there are + * samples to read. + */ + if (dev_priv->perf.oa.pollin) + events |= POLLIN; - return streams; + return events; } static unsigned int i915_perf_poll(struct file *file, poll_table *wait) @@ -121,7 +1046,7 @@ static unsigned int i915_perf_poll(struct file *file, poll_table *wait) int ret; mutex_lock(&dev_priv->perf.lock); - ret = i915_perf_poll_locked(stream, file, wait); + ret = i915_perf_poll_locked(dev_priv, stream, file, wait); mutex_unlock(&dev_priv->perf.lock); return ret; @@ -285,18 +1210,21 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, goto err_ctx; } - stream->sample_flags = props->sample_flags; stream->dev_priv = dev_priv; stream->ctx = specific_ctx; - /* - * TODO: support sampling something - * - * For now this is as far as we can go. + ret = i915_oa_stream_init(stream, param, props); + if (ret) + goto err_alloc; + + /* we avoid simply assigning stream->sample_flags = props->sample_flags + * to have _stream_init check the combination of sample flags more + * thoroughly, but still this is the expected result at this point. */ - DRM_ERROR("Unsupported i915 perf stream configuration\n"); - ret = -EINVAL; - goto err_alloc; + if (WARN_ON(stream->sample_flags != props->sample_flags)) { + ret = -ENODEV; + goto err_alloc; + } list_add(&stream->link, &dev_priv->perf.streams); @@ -382,6 +1310,56 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv, props->single_context = 1; props->ctx_handle = value; break; + case DRM_I915_PERF_PROP_SAMPLE_OA: + props->sample_flags |= SAMPLE_OA_REPORT; + break; + case DRM_I915_PERF_PROP_OA_METRICS_SET: + if (value == 0 || + value > dev_priv->perf.oa.n_builtin_sets) { + DRM_ERROR("Unknown OA metric set ID"); + return -EINVAL; + } + props->metrics_set = value; + break; + case DRM_I915_PERF_PROP_OA_FORMAT: + if (value == 0 || value >= I915_OA_FORMAT_MAX) { + DRM_ERROR("Invalid OA report format\n"); + return -EINVAL; + } + if (!dev_priv->perf.oa.oa_formats[value].size) { + DRM_ERROR("Invalid OA report format\n"); + return -EINVAL; + } + props->oa_format = value; + break; + case DRM_I915_PERF_PROP_OA_EXPONENT: + if (value > OA_EXPONENT_MAX) { + DRM_ERROR("OA timer exponent too high (> %u)\n", + OA_EXPONENT_MAX); + return -EINVAL; + } + + /* NB: The exponent represents a period as follows: + * + * 80ns * 2^(period_exponent + 1) + * + * Theoretically we can program the OA unit to sample + * every 160ns but don't allow that by default unless + * root. + * + * Referring to perf's + * kernel.perf_event_max_sample_rate for a precedent + * (100000 by default); with an OA exponent of 6 we get + * a period of 10.240 microseconds -just under 100000Hz + */ + if (value < 6 && !capable(CAP_SYS_ADMIN)) { + DRM_ERROR("Minimum OA sampling exponent is 6 without root privileges\n"); + return -EACCES; + } + + props->oa_periodic = true; + props->oa_period_exponent = value; + break; default: MISSING_CASE(id); DRM_ERROR("Unknown i915 perf property ID"); @@ -432,8 +1410,33 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data, void i915_perf_init(struct drm_i915_private *dev_priv) { + if (!IS_HASWELL(dev_priv)) + return; + + hrtimer_init(&dev_priv->perf.oa.poll_check_timer, + CLOCK_MONOTONIC, HRTIMER_MODE_REL); + dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb; + init_waitqueue_head(&dev_priv->perf.oa.poll_wq); + INIT_LIST_HEAD(&dev_priv->perf.streams); mutex_init(&dev_priv->perf.lock); + spin_lock_init(&dev_priv->perf.hook_lock); + + dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer; + dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set; + dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set; + dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable; + dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable; + dev_priv->perf.oa.ops.read = gen7_oa_read; + dev_priv->perf.oa.ops.oa_buffer_is_empty = + gen7_oa_buffer_is_empty_fop_unlocked; + + dev_priv->perf.oa.timestamp_frequency = 12500000; + + dev_priv->perf.oa.oa_formats = hsw_oa_formats; + + dev_priv->perf.oa.n_builtin_sets = + i915_oa_n_builtin_metric_sets_hsw; dev_priv->perf.initialized = true; } @@ -443,7 +1446,6 @@ void i915_perf_fini(struct drm_i915_private *dev_priv) if (!dev_priv->perf.initialized) return; - /* Currently nothing to clean up */ - + memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops)); dev_priv->perf.initialized = false; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ee8707343ae4..fcad8fa41274 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -616,6 +616,343 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define HSW_CS_GPR_UDW(n) _MMIO(0x2600 + (n) * 8 + 4) #define GEN7_OACONTROL _MMIO(0x2360) +#define GEN7_OACONTROL_CTX_MASK 0xFFFFF000 +#define GEN7_OACONTROL_TIMER_PERIOD_MASK 0x3F +#define GEN7_OACONTROL_TIMER_PERIOD_SHIFT 6 +#define GEN7_OACONTROL_TIMER_ENABLE (1<<5) +#define GEN7_OACONTROL_FORMAT_A13 (0<<2) +#define GEN7_OACONTROL_FORMAT_A29 (1<<2) +#define GEN7_OACONTROL_FORMAT_A13_B8_C8 (2<<2) +#define GEN7_OACONTROL_FORMAT_A29_B8_C8 (3<<2) +#define GEN7_OACONTROL_FORMAT_B4_C8 (4<<2) +#define GEN7_OACONTROL_FORMAT_A45_B8_C8 (5<<2) +#define GEN7_OACONTROL_FORMAT_B4_C8_A16 (6<<2) +#define GEN7_OACONTROL_FORMAT_C4_B8 (7<<2) +#define GEN7_OACONTROL_FORMAT_SHIFT 2 +#define GEN7_OACONTROL_PER_CTX_ENABLE (1<<1) +#define GEN7_OACONTROL_ENABLE (1<<0) + +#define GEN8_OACTXID _MMIO(0x2364) + +#define GEN8_OACONTROL _MMIO(0x2B00) +#define GEN8_OA_REPORT_FORMAT_A12 (0<<2) +#define GEN8_OA_REPORT_FORMAT_A12_B8_C8 (2<<2) +#define GEN8_OA_REPORT_FORMAT_A36_B8_C8 (5<<2) +#define GEN8_OA_REPORT_FORMAT_C4_B8 (7<<2) +#define GEN8_OA_REPORT_FORMAT_SHIFT 2 +#define GEN8_OA_SPECIFIC_CONTEXT_ENABLE (1<<1) +#define GEN8_OA_COUNTER_ENABLE (1<<0) + +#define GEN8_OACTXCONTROL _MMIO(0x2360) +#define GEN8_OA_TIMER_PERIOD_MASK 0x3F +#define GEN8_OA_TIMER_PERIOD_SHIFT 2 +#define GEN8_OA_TIMER_ENABLE (1<<1) +#define GEN8_OA_COUNTER_RESUME (1<<0) + +#define GEN7_OABUFFER _MMIO(0x23B0) /* R/W */ +#define GEN7_OABUFFER_OVERRUN_DISABLE (1<<3) +#define GEN7_OABUFFER_EDGE_TRIGGER (1<<2) +#define GEN7_OABUFFER_STOP_RESUME_ENABLE (1<<1) +#define GEN7_OABUFFER_RESUME (1<<0) + +#define GEN8_OABUFFER _MMIO(0x2b14) + +#define GEN7_OASTATUS1 _MMIO(0x2364) +#define GEN7_OASTATUS1_TAIL_MASK 0xffffffc0 +#define GEN7_OASTATUS1_COUNTER_OVERFLOW (1<<2) +#define GEN7_OASTATUS1_OABUFFER_OVERFLOW (1<<1) +#define GEN7_OASTATUS1_REPORT_LOST (1<<0) + +#define GEN7_OASTATUS2 _MMIO(0x2368) +#define GEN7_OASTATUS2_HEAD_MASK 0xffffffc0 + +#define GEN8_OASTATUS _MMIO(0x2b08) +#define GEN8_OASTATUS_OVERRUN_STATUS (1<<3) +#define GEN8_OASTATUS_COUNTER_OVERFLOW (1<<2) +#define GEN8_OASTATUS_OABUFFER_OVERFLOW (1<<1) +#define GEN8_OASTATUS_REPORT_LOST (1<<0) + +#define GEN8_OAHEADPTR _MMIO(0x2B0C) +#define GEN8_OATAILPTR _MMIO(0x2B10) + +#define OABUFFER_SIZE_128K (0<<3) +#define OABUFFER_SIZE_256K (1<<3) +#define OABUFFER_SIZE_512K (2<<3) +#define OABUFFER_SIZE_1M (3<<3) +#define OABUFFER_SIZE_2M (4<<3) +#define OABUFFER_SIZE_4M (5<<3) +#define OABUFFER_SIZE_8M (6<<3) +#define OABUFFER_SIZE_16M (7<<3) + +#define OA_MEM_SELECT_GGTT (1<<0) + +#define EU_PERF_CNTL0 _MMIO(0xe458) + +#define GDT_CHICKEN_BITS _MMIO(0x9840) +#define GT_NOA_ENABLE 0x00000080 + +/* + * OA Boolean state + */ + +#define OAREPORTTRIG1 _MMIO(0x2740) +#define OAREPORTTRIG1_THRESHOLD_MASK 0xffff +#define OAREPORTTRIG1_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */ + +#define OAREPORTTRIG2 _MMIO(0x2744) +#define OAREPORTTRIG2_INVERT_A_0 (1<<0) +#define OAREPORTTRIG2_INVERT_A_1 (1<<1) +#define OAREPORTTRIG2_INVERT_A_2 (1<<2) +#define OAREPORTTRIG2_INVERT_A_3 (1<<3) +#define OAREPORTTRIG2_INVERT_A_4 (1<<4) +#define OAREPORTTRIG2_INVERT_A_5 (1<<5) +#define OAREPORTTRIG2_INVERT_A_6 (1<<6) +#define OAREPORTTRIG2_INVERT_A_7 (1<<7) +#define OAREPORTTRIG2_INVERT_A_8 (1<<8) +#define OAREPORTTRIG2_INVERT_A_9 (1<<9) +#define OAREPORTTRIG2_INVERT_A_10 (1<<10) +#define OAREPORTTRIG2_INVERT_A_11 (1<<11) +#define OAREPORTTRIG2_INVERT_A_12 (1<<12) +#define OAREPORTTRIG2_INVERT_A_13 (1<<13) +#define OAREPORTTRIG2_INVERT_A_14 (1<<14) +#define OAREPORTTRIG2_INVERT_A_15 (1<<15) +#define OAREPORTTRIG2_INVERT_B_0 (1<<16) +#define OAREPORTTRIG2_INVERT_B_1 (1<<17) +#define OAREPORTTRIG2_INVERT_B_2 (1<<18) +#define OAREPORTTRIG2_INVERT_B_3 (1<<19) +#define OAREPORTTRIG2_INVERT_C_0 (1<<20) +#define OAREPORTTRIG2_INVERT_C_1 (1<<21) +#define OAREPORTTRIG2_INVERT_D_0 (1<<22) +#define OAREPORTTRIG2_THRESHOLD_ENABLE (1<<23) +#define OAREPORTTRIG2_REPORT_TRIGGER_ENABLE (1<<31) + +#define OAREPORTTRIG3 _MMIO(0x2748) +#define OAREPORTTRIG3_NOA_SELECT_MASK 0xf +#define OAREPORTTRIG3_NOA_SELECT_8_SHIFT 0 +#define OAREPORTTRIG3_NOA_SELECT_9_SHIFT 4 +#define OAREPORTTRIG3_NOA_SELECT_10_SHIFT 8 +#define OAREPORTTRIG3_NOA_SELECT_11_SHIFT 12 +#define OAREPORTTRIG3_NOA_SELECT_12_SHIFT 16 +#define OAREPORTTRIG3_NOA_SELECT_13_SHIFT 20 +#define OAREPORTTRIG3_NOA_SELECT_14_SHIFT 24 +#define OAREPORTTRIG3_NOA_SELECT_15_SHIFT 28 + +#define OAREPORTTRIG4 _MMIO(0x274c) +#define OAREPORTTRIG4_NOA_SELECT_MASK 0xf +#define OAREPORTTRIG4_NOA_SELECT_0_SHIFT 0 +#define OAREPORTTRIG4_NOA_SELECT_1_SHIFT 4 +#define OAREPORTTRIG4_NOA_SELECT_2_SHIFT 8 +#define OAREPORTTRIG4_NOA_SELECT_3_SHIFT 12 +#define OAREPORTTRIG4_NOA_SELECT_4_SHIFT 16 +#define OAREPORTTRIG4_NOA_SELECT_5_SHIFT 20 +#define OAREPORTTRIG4_NOA_SELECT_6_SHIFT 24 +#define OAREPORTTRIG4_NOA_SELECT_7_SHIFT 28 + +#define OAREPORTTRIG5 _MMIO(0x2750) +#define OAREPORTTRIG5_THRESHOLD_MASK 0xffff +#define OAREPORTTRIG5_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */ + +#define OAREPORTTRIG6 _MMIO(0x2754) +#define OAREPORTTRIG6_INVERT_A_0 (1<<0) +#define OAREPORTTRIG6_INVERT_A_1 (1<<1) +#define OAREPORTTRIG6_INVERT_A_2 (1<<2) +#define OAREPORTTRIG6_INVERT_A_3 (1<<3) +#define OAREPORTTRIG6_INVERT_A_4 (1<<4) +#define OAREPORTTRIG6_INVERT_A_5 (1<<5) +#define OAREPORTTRIG6_INVERT_A_6 (1<<6) +#define OAREPORTTRIG6_INVERT_A_7 (1<<7) +#define OAREPORTTRIG6_INVERT_A_8 (1<<8) +#define OAREPORTTRIG6_INVERT_A_9 (1<<9) +#define OAREPORTTRIG6_INVERT_A_10 (1<<10) +#define OAREPORTTRIG6_INVERT_A_11 (1<<11) +#define OAREPORTTRIG6_INVERT_A_12 (1<<12) +#define OAREPORTTRIG6_INVERT_A_13 (1<<13) +#define OAREPORTTRIG6_INVERT_A_14 (1<<14) +#define OAREPORTTRIG6_INVERT_A_15 (1<<15) +#define OAREPORTTRIG6_INVERT_B_0 (1<<16) +#define OAREPORTTRIG6_INVERT_B_1 (1<<17) +#define OAREPORTTRIG6_INVERT_B_2 (1<<18) +#define OAREPORTTRIG6_INVERT_B_3 (1<<19) +#define OAREPORTTRIG6_INVERT_C_0 (1<<20) +#define OAREPORTTRIG6_INVERT_C_1 (1<<21) +#define OAREPORTTRIG6_INVERT_D_0 (1<<22) +#define OAREPORTTRIG6_THRESHOLD_ENABLE (1<<23) +#define OAREPORTTRIG6_REPORT_TRIGGER_ENABLE (1<<31) + +#define OAREPORTTRIG7 _MMIO(0x2758) +#define OAREPORTTRIG7_NOA_SELECT_MASK 0xf +#define OAREPORTTRIG7_NOA_SELECT_8_SHIFT 0 +#define OAREPORTTRIG7_NOA_SELECT_9_SHIFT 4 +#define OAREPORTTRIG7_NOA_SELECT_10_SHIFT 8 +#define OAREPORTTRIG7_NOA_SELECT_11_SHIFT 12 +#define OAREPORTTRIG7_NOA_SELECT_12_SHIFT 16 +#define OAREPORTTRIG7_NOA_SELECT_13_SHIFT 20 +#define OAREPORTTRIG7_NOA_SELECT_14_SHIFT 24 +#define OAREPORTTRIG7_NOA_SELECT_15_SHIFT 28 + +#define OAREPORTTRIG8 _MMIO(0x275c) +#define OAREPORTTRIG8_NOA_SELECT_MASK 0xf +#define OAREPORTTRIG8_NOA_SELECT_0_SHIFT 0 +#define OAREPORTTRIG8_NOA_SELECT_1_SHIFT 4 +#define OAREPORTTRIG8_NOA_SELECT_2_SHIFT 8 +#define OAREPORTTRIG8_NOA_SELECT_3_SHIFT 12 +#define OAREPORTTRIG8_NOA_SELECT_4_SHIFT 16 +#define OAREPORTTRIG8_NOA_SELECT_5_SHIFT 20 +#define OAREPORTTRIG8_NOA_SELECT_6_SHIFT 24 +#define OAREPORTTRIG8_NOA_SELECT_7_SHIFT 28 + +#define OASTARTTRIG1 _MMIO(0x2710) +#define OASTARTTRIG1_THRESHOLD_COUNT_MASK_MBZ 0xffff0000 +#define OASTARTTRIG1_THRESHOLD_MASK 0xffff + +#define OASTARTTRIG2 _MMIO(0x2714) +#define OASTARTTRIG2_INVERT_A_0 (1<<0) +#define OASTARTTRIG2_INVERT_A_1 (1<<1) +#define OASTARTTRIG2_INVERT_A_2 (1<<2) +#define OASTARTTRIG2_INVERT_A_3 (1<<3) +#define OASTARTTRIG2_INVERT_A_4 (1<<4) +#define OASTARTTRIG2_INVERT_A_5 (1<<5) +#define OASTARTTRIG2_INVERT_A_6 (1<<6) +#define OASTARTTRIG2_INVERT_A_7 (1<<7) +#define OASTARTTRIG2_INVERT_A_8 (1<<8) +#define OASTARTTRIG2_INVERT_A_9 (1<<9) +#define OASTARTTRIG2_INVERT_A_10 (1<<10) +#define OASTARTTRIG2_INVERT_A_11 (1<<11) +#define OASTARTTRIG2_INVERT_A_12 (1<<12) +#define OASTARTTRIG2_INVERT_A_13 (1<<13) +#define OASTARTTRIG2_INVERT_A_14 (1<<14) +#define OASTARTTRIG2_INVERT_A_15 (1<<15) +#define OASTARTTRIG2_INVERT_B_0 (1<<16) +#define OASTARTTRIG2_INVERT_B_1 (1<<17) +#define OASTARTTRIG2_INVERT_B_2 (1<<18) +#define OASTARTTRIG2_INVERT_B_3 (1<<19) +#define OASTARTTRIG2_INVERT_C_0 (1<<20) +#define OASTARTTRIG2_INVERT_C_1 (1<<21) +#define OASTARTTRIG2_INVERT_D_0 (1<<22) +#define OASTARTTRIG2_THRESHOLD_ENABLE (1<<23) +#define OASTARTTRIG2_START_TRIG_FLAG_MBZ (1<<24) +#define OASTARTTRIG2_EVENT_SELECT_0 (1<<28) +#define OASTARTTRIG2_EVENT_SELECT_1 (1<<29) +#define OASTARTTRIG2_EVENT_SELECT_2 (1<<30) +#define OASTARTTRIG2_EVENT_SELECT_3 (1<<31) + +#define OASTARTTRIG3 _MMIO(0x2718) +#define OASTARTTRIG3_NOA_SELECT_MASK 0xf +#define OASTARTTRIG3_NOA_SELECT_8_SHIFT 0 +#define OASTARTTRIG3_NOA_SELECT_9_SHIFT 4 +#define OASTARTTRIG3_NOA_SELECT_10_SHIFT 8 +#define OASTARTTRIG3_NOA_SELECT_11_SHIFT 12 +#define OASTARTTRIG3_NOA_SELECT_12_SHIFT 16 +#define OASTARTTRIG3_NOA_SELECT_13_SHIFT 20 +#define OASTARTTRIG3_NOA_SELECT_14_SHIFT 24 +#define OASTARTTRIG3_NOA_SELECT_15_SHIFT 28 + +#define OASTARTTRIG4 _MMIO(0x271c) +#define OASTARTTRIG4_NOA_SELECT_MASK 0xf +#define OASTARTTRIG4_NOA_SELECT_0_SHIFT 0 +#define OASTARTTRIG4_NOA_SELECT_1_SHIFT 4 +#define OASTARTTRIG4_NOA_SELECT_2_SHIFT 8 +#define OASTARTTRIG4_NOA_SELECT_3_SHIFT 12 +#define OASTARTTRIG4_NOA_SELECT_4_SHIFT 16 +#define OASTARTTRIG4_NOA_SELECT_5_SHIFT 20 +#define OASTARTTRIG4_NOA_SELECT_6_SHIFT 24 +#define OASTARTTRIG4_NOA_SELECT_7_SHIFT 28 + +#define OASTARTTRIG5 _MMIO(0x2720) +#define OASTARTTRIG5_THRESHOLD_COUNT_MASK_MBZ 0xffff0000 +#define OASTARTTRIG5_THRESHOLD_MASK 0xffff + +#define OASTARTTRIG6 _MMIO(0x2724) +#define OASTARTTRIG6_INVERT_A_0 (1<<0) +#define OASTARTTRIG6_INVERT_A_1 (1<<1) +#define OASTARTTRIG6_INVERT_A_2 (1<<2) +#define OASTARTTRIG6_INVERT_A_3 (1<<3) +#define OASTARTTRIG6_INVERT_A_4 (1<<4) +#define OASTARTTRIG6_INVERT_A_5 (1<<5) +#define OASTARTTRIG6_INVERT_A_6 (1<<6) +#define OASTARTTRIG6_INVERT_A_7 (1<<7) +#define OASTARTTRIG6_INVERT_A_8 (1<<8) +#define OASTARTTRIG6_INVERT_A_9 (1<<9) +#define OASTARTTRIG6_INVERT_A_10 (1<<10) +#define OASTARTTRIG6_INVERT_A_11 (1<<11) +#define OASTARTTRIG6_INVERT_A_12 (1<<12) +#define OASTARTTRIG6_INVERT_A_13 (1<<13) +#define OASTARTTRIG6_INVERT_A_14 (1<<14) +#define OASTARTTRIG6_INVERT_A_15 (1<<15) +#define OASTARTTRIG6_INVERT_B_0 (1<<16) +#define OASTARTTRIG6_INVERT_B_1 (1<<17) +#define OASTARTTRIG6_INVERT_B_2 (1<<18) +#define OASTARTTRIG6_INVERT_B_3 (1<<19) +#define OASTARTTRIG6_INVERT_C_0 (1<<20) +#define OASTARTTRIG6_INVERT_C_1 (1<<21) +#define OASTARTTRIG6_INVERT_D_0 (1<<22) +#define OASTARTTRIG6_THRESHOLD_ENABLE (1<<23) +#define OASTARTTRIG6_START_TRIG_FLAG_MBZ (1<<24) +#define OASTARTTRIG6_EVENT_SELECT_4 (1<<28) +#define OASTARTTRIG6_EVENT_SELECT_5 (1<<29) +#define OASTARTTRIG6_EVENT_SELECT_6 (1<<30) +#define OASTARTTRIG6_EVENT_SELECT_7 (1<<31) + +#define OASTARTTRIG7 _MMIO(0x2728) +#define OASTARTTRIG7_NOA_SELECT_MASK 0xf +#define OASTARTTRIG7_NOA_SELECT_8_SHIFT 0 +#define OASTARTTRIG7_NOA_SELECT_9_SHIFT 4 +#define OASTARTTRIG7_NOA_SELECT_10_SHIFT 8 +#define OASTARTTRIG7_NOA_SELECT_11_SHIFT 12 +#define OASTARTTRIG7_NOA_SELECT_12_SHIFT 16 +#define OASTARTTRIG7_NOA_SELECT_13_SHIFT 20 +#define OASTARTTRIG7_NOA_SELECT_14_SHIFT 24 +#define OASTARTTRIG7_NOA_SELECT_15_SHIFT 28 + +#define OASTARTTRIG8 _MMIO(0x272c) +#define OASTARTTRIG8_NOA_SELECT_MASK 0xf +#define OASTARTTRIG8_NOA_SELECT_0_SHIFT 0 +#define OASTARTTRIG8_NOA_SELECT_1_SHIFT 4 +#define OASTARTTRIG8_NOA_SELECT_2_SHIFT 8 +#define OASTARTTRIG8_NOA_SELECT_3_SHIFT 12 +#define OASTARTTRIG8_NOA_SELECT_4_SHIFT 16 +#define OASTARTTRIG8_NOA_SELECT_5_SHIFT 20 +#define OASTARTTRIG8_NOA_SELECT_6_SHIFT 24 +#define OASTARTTRIG8_NOA_SELECT_7_SHIFT 28 + +/* CECX_0 */ +#define OACEC_COMPARE_LESS_OR_EQUAL 6 +#define OACEC_COMPARE_NOT_EQUAL 5 +#define OACEC_COMPARE_LESS_THAN 4 +#define OACEC_COMPARE_GREATER_OR_EQUAL 3 +#define OACEC_COMPARE_EQUAL 2 +#define OACEC_COMPARE_GREATER_THAN 1 +#define OACEC_COMPARE_ANY_EQUAL 0 + +#define OACEC_COMPARE_VALUE_MASK 0xffff +#define OACEC_COMPARE_VALUE_SHIFT 3 + +#define OACEC_SELECT_NOA (0<<19) +#define OACEC_SELECT_PREV (1<<19) +#define OACEC_SELECT_BOOLEAN (2<<19) + +/* CECX_1 */ +#define OACEC_MASK_MASK 0xffff +#define OACEC_CONSIDERATIONS_MASK 0xffff +#define OACEC_CONSIDERATIONS_SHIFT 16 + +#define OACEC0_0 _MMIO(0x2770) +#define OACEC0_1 _MMIO(0x2774) +#define OACEC1_0 _MMIO(0x2778) +#define OACEC1_1 _MMIO(0x277c) +#define OACEC2_0 _MMIO(0x2780) +#define OACEC2_1 _MMIO(0x2784) +#define OACEC3_0 _MMIO(0x2788) +#define OACEC3_1 _MMIO(0x278c) +#define OACEC4_0 _MMIO(0x2790) +#define OACEC4_1 _MMIO(0x2794) +#define OACEC5_0 _MMIO(0x2798) +#define OACEC5_1 _MMIO(0x279c) +#define OACEC6_0 _MMIO(0x27a0) +#define OACEC6_1 _MMIO(0x27a4) +#define OACEC7_0 _MMIO(0x27a8) +#define OACEC7_1 _MMIO(0x27ac) + #define _GEN7_PIPEA_DE_LOAD_SL 0x70068 #define _GEN7_PIPEB_DE_LOAD_SL 0x71068 @@ -6914,6 +7251,7 @@ enum { # define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11) #define GEN6_UCGCTL3 _MMIO(0x9408) +# define GEN6_OACSUNIT_CLOCK_GATE_DISABLE (1 << 20) #define GEN7_UCGCTL4 _MMIO(0x940c) #define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25) diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 192dc1f8256b..bdfc68876990 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -1230,6 +1230,18 @@ struct drm_i915_gem_context_param { __u64 value; }; +enum drm_i915_oa_format { + I915_OA_FORMAT_A13 = 1, + I915_OA_FORMAT_A29, + I915_OA_FORMAT_A13_B8_C8, + I915_OA_FORMAT_B4_C8, + I915_OA_FORMAT_A45_B8_C8, + I915_OA_FORMAT_B4_C8_A16, + I915_OA_FORMAT_C4_B8, + + I915_OA_FORMAT_MAX /* non-ABI */ +}; + enum drm_i915_perf_property_id { /** * Open the stream for a specific context handle (as used with @@ -1238,6 +1250,32 @@ enum drm_i915_perf_property_id { */ DRM_I915_PERF_PROP_CTX_HANDLE = 1, + /** + * A value of 1 requests the inclusion of raw OA unit reports as + * part of stream samples. + */ + DRM_I915_PERF_PROP_SAMPLE_OA, + + /** + * The value specifies which set of OA unit metrics should be + * be configured, defining the contents of any OA unit reports. + */ + DRM_I915_PERF_PROP_OA_METRICS_SET, + + /** + * The value specifies the size and layout of OA unit reports. + */ + DRM_I915_PERF_PROP_OA_FORMAT, + + /** + * Specifying this property implicitly requests periodic OA unit + * sampling and (at least on Haswell) the sampling frequency is derived + * from this exponent as follows: + * + * 80ns * 2^(period_exponent + 1) + */ + DRM_I915_PERF_PROP_OA_EXPONENT, + DRM_I915_PERF_PROP_MAX /* non-ABI */ }; @@ -1257,7 +1295,23 @@ struct drm_i915_perf_open_param { __u64 __user properties_ptr; }; +/** + * Enable data capture for a stream that was either opened in a disabled state + * via I915_PERF_FLAG_DISABLED or was later disabled via + * I915_PERF_IOCTL_DISABLE. + * + * It is intended to be cheaper to disable and enable a stream than it may be + * to close and re-open a stream with the same configuration. + * + * It's undefined whether any pending data for the stream will be lost. + */ #define I915_PERF_IOCTL_ENABLE _IO('i', 0x0) + +/** + * Disable data capture for a stream. + * + * It is an error to try and read a stream that is disabled. + */ #define I915_PERF_IOCTL_DISABLE _IO('i', 0x1) /** @@ -1281,17 +1335,30 @@ enum drm_i915_perf_record_type { * every sample. * * The order of these sample properties given by userspace has no - * affect on the ordering of data within a sample. The order will be + * affect on the ordering of data within a sample. The order is * documented here. * * struct { * struct drm_i915_perf_record_header header; * - * TODO: itemize extensible sample data here + * { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA * }; */ DRM_I915_PERF_RECORD_SAMPLE = 1, + /* + * Indicates that one or more OA reports were not written by the + * hardware. This can happen for example if an MI_REPORT_PERF_COUNT + * command collides with periodic sampling - which would be more likely + * at higher sampling frequencies. + */ + DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2, + + /** + * An error occurred that resulted in all pending OA reports being lost. + */ + DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3, + DRM_I915_PERF_RECORD_MAX /* non-ABI */ }; -- cgit v1.2.3 From 442b8c06fc7230772a663a00feeb5ebc61652d6d Mon Sep 17 00:00:00 2001 From: Robert Bragg Date: Mon, 7 Nov 2016 19:49:53 +0000 Subject: drm/i915: advertise available metrics via sysfs Each metric set is given a sysfs entry like: /sys/class/drm/card0/metrics//id This allows userspace to enumerate the specific sets that are available for the current system. The 'id' file contains an unsigned integer that can be used to open the associated metric set via DRM_IOCTL_I915_PERF_OPEN. The is a globally unique ID for a specific OA unit register configuration that can be reliably used by userspace as a key to lookup corresponding counter meta data and normalization equations. The guid registry is currently maintained as part of gputop along with the XML metric set descriptions and code generation scripts, ref: https://github.com/rib/gputop > gputop-data/guids.xml > scripts/update-guids.py > gputop-data/oa-*.xml > scripts/i915-perf-kernelgen.py $ make -C gputop-data -f Makefile.xml SYSFS=1 WHITELIST=RenderBasic Signed-off-by: Robert Bragg Reviewed-by: Matthew Auld Reviewed-by: Sourab Gupta Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161107194957.3385-8-robert@sixbynine.org --- drivers/gpu/drm/i915/i915_drv.c | 5 ++++ drivers/gpu/drm/i915/i915_drv.h | 4 +++ drivers/gpu/drm/i915/i915_oa_hsw.c | 51 +++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_oa_hsw.h | 4 +++ drivers/gpu/drm/i915/i915_perf.c | 52 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 116 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 94a9fb0f7b39..ff13503eae08 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1129,6 +1129,9 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) i915_debugfs_register(dev_priv); i915_guc_register(dev_priv); i915_setup_sysfs(dev_priv); + + /* Depends on sysfs having been initialized */ + i915_perf_register(dev_priv); } else DRM_ERROR("Failed to register driver for userspace access!\n"); @@ -1165,6 +1168,8 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv) acpi_video_unregister(); intel_opregion_unregister(dev_priv); + i915_perf_unregister(dev_priv); + i915_teardown_sysfs(dev_priv); i915_guc_unregister(dev_priv); i915_debugfs_unregister(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 3a69e219c7a2..8da752304a91 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2193,6 +2193,8 @@ struct drm_i915_private { struct { bool initialized; + struct kobject *metrics_kobj; + struct mutex lock; struct list_head streams; @@ -3533,6 +3535,8 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, /* i915_perf.c */ extern void i915_perf_init(struct drm_i915_private *dev_priv); extern void i915_perf_fini(struct drm_i915_private *dev_priv); +extern void i915_perf_register(struct drm_i915_private *dev_priv); +extern void i915_perf_unregister(struct drm_i915_private *dev_priv); /* i915_suspend.c */ extern int i915_save_state(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/i915_oa_hsw.c index 8906380d9177..6af25cfefc22 100644 --- a/drivers/gpu/drm/i915/i915_oa_hsw.c +++ b/drivers/gpu/drm/i915/i915_oa_hsw.c @@ -24,6 +24,8 @@ * */ +#include + #include "i915_drv.h" #include "i915_oa_hsw.h" @@ -142,3 +144,52 @@ int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv) return -ENODEV; } } + +static ssize_t +show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC); +} + +static struct device_attribute dev_attr_render_basic_id = { + .attr = { .name = "id", .mode = 0444 }, + .show = show_render_basic_id, + .store = NULL, +}; + +static struct attribute *attrs_render_basic[] = { + &dev_attr_render_basic_id.attr, + NULL, +}; + +static struct attribute_group group_render_basic = { + .name = "403d8832-1a27-4aa6-a64e-f5389ce7b212", + .attrs = attrs_render_basic, +}; + +int +i915_perf_register_sysfs_hsw(struct drm_i915_private *dev_priv) +{ + int mux_len; + int ret = 0; + + if (get_render_basic_mux_config(dev_priv, &mux_len)) { + ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic); + if (ret) + goto error_render_basic; + } + + return 0; + +error_render_basic: + return ret; +} + +void +i915_perf_unregister_sysfs_hsw(struct drm_i915_private *dev_priv) +{ + int mux_len; + + if (get_render_basic_mux_config(dev_priv, &mux_len)) + sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic); +} diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.h b/drivers/gpu/drm/i915/i915_oa_hsw.h index b618a1f0aa94..429a229b5158 100644 --- a/drivers/gpu/drm/i915/i915_oa_hsw.h +++ b/drivers/gpu/drm/i915/i915_oa_hsw.h @@ -31,4 +31,8 @@ extern int i915_oa_n_builtin_metric_sets_hsw; extern int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv); +extern int i915_perf_register_sysfs_hsw(struct drm_i915_private *dev_priv); + +extern void i915_perf_unregister_sysfs_hsw(struct drm_i915_private *dev_priv); + #endif diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 54653bcf0d79..c427cd8cbe1b 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -812,6 +812,15 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, int format_size; int ret; + /* If the sysfs metrics/ directory wasn't registered for some + * reason then don't let userspace try their luck with config + * IDs + */ + if (!dev_priv->perf.metrics_kobj) { + DRM_ERROR("OA metrics weren't advertised via sysfs\n"); + return -EINVAL; + } + if (!(props->sample_flags & SAMPLE_OA_REPORT)) { DRM_ERROR("Only OA report sampling supported\n"); return -EINVAL; @@ -1408,6 +1417,49 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data, return ret; } +void i915_perf_register(struct drm_i915_private *dev_priv) +{ + if (!IS_HASWELL(dev_priv)) + return; + + if (!dev_priv->perf.initialized) + return; + + /* To be sure we're synchronized with an attempted + * i915_perf_open_ioctl(); considering that we register after + * being exposed to userspace. + */ + mutex_lock(&dev_priv->perf.lock); + + dev_priv->perf.metrics_kobj = + kobject_create_and_add("metrics", + &dev_priv->drm.primary->kdev->kobj); + if (!dev_priv->perf.metrics_kobj) + goto exit; + + if (i915_perf_register_sysfs_hsw(dev_priv)) { + kobject_put(dev_priv->perf.metrics_kobj); + dev_priv->perf.metrics_kobj = NULL; + } + +exit: + mutex_unlock(&dev_priv->perf.lock); +} + +void i915_perf_unregister(struct drm_i915_private *dev_priv) +{ + if (!IS_HASWELL(dev_priv)) + return; + + if (!dev_priv->perf.metrics_kobj) + return; + + i915_perf_unregister_sysfs_hsw(dev_priv); + + kobject_put(dev_priv->perf.metrics_kobj); + dev_priv->perf.metrics_kobj = NULL; +} + void i915_perf_init(struct drm_i915_private *dev_priv) { if (!IS_HASWELL(dev_priv)) -- cgit v1.2.3 From ccdf6341ed36e403b2c12d3ef6cb50e9f8e6bdcc Mon Sep 17 00:00:00 2001 From: Robert Bragg Date: Mon, 7 Nov 2016 19:49:54 +0000 Subject: drm/i915: Add dev.i915.perf_stream_paranoid sysctl option Consistent with the kernel.perf_event_paranoid sysctl option that can allow non-root users to access system wide cpu metrics, this can optionally allow non-root users to access system wide OA counter metrics from Gen graphics hardware. Signed-off-by: Robert Bragg Reviewed-by: Matthew Auld Reviewed-by: Sourab Gupta Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161107194957.3385-9-robert@sixbynine.org --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_perf.c | 50 +++++++++++++++++++++++++++++++++++++++- 2 files changed, 50 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8da752304a91..970e50bf9884 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2194,6 +2194,7 @@ struct drm_i915_private { bool initialized; struct kobject *metrics_kobj; + struct ctl_table_header *sysctl_header; struct mutex lock; struct list_head streams; diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index c427cd8cbe1b..e51c1d82255f 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -64,6 +64,11 @@ #define POLL_FREQUENCY 200 #define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY) +/* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */ +static int zero; +static int one = 1; +static u32 i915_perf_stream_paranoid = true; + /* The maximum exponent the hardware accepts is 63 (essentially it selects one * of the 64bit timestamp bits to trigger reports from) but there's currently * no known use case for sampling as infrequently as once per 47 thousand years. @@ -1207,7 +1212,13 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, } } - if (!specific_ctx && !capable(CAP_SYS_ADMIN)) { + /* Similar to perf's kernel.perf_paranoid_cpu sysctl option + * we check a dev.i915.perf_stream_paranoid sysctl option + * to determine if it's ok to access system wide OA counters + * without CAP_SYS_ADMIN privileges. + */ + if (!specific_ctx && + i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { DRM_ERROR("Insufficient privileges to open system-wide i915 perf stream\n"); ret = -EACCES; goto err_ctx; @@ -1460,6 +1471,39 @@ void i915_perf_unregister(struct drm_i915_private *dev_priv) dev_priv->perf.metrics_kobj = NULL; } +static struct ctl_table oa_table[] = { + { + .procname = "perf_stream_paranoid", + .data = &i915_perf_stream_paranoid, + .maxlen = sizeof(i915_perf_stream_paranoid), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, + {} +}; + +static struct ctl_table i915_root[] = { + { + .procname = "i915", + .maxlen = 0, + .mode = 0555, + .child = oa_table, + }, + {} +}; + +static struct ctl_table dev_root[] = { + { + .procname = "dev", + .maxlen = 0, + .mode = 0555, + .child = i915_root, + }, + {} +}; + void i915_perf_init(struct drm_i915_private *dev_priv) { if (!IS_HASWELL(dev_priv)) @@ -1490,6 +1534,8 @@ void i915_perf_init(struct drm_i915_private *dev_priv) dev_priv->perf.oa.n_builtin_sets = i915_oa_n_builtin_metric_sets_hsw; + dev_priv->perf.sysctl_header = register_sysctl_table(dev_root); + dev_priv->perf.initialized = true; } @@ -1498,6 +1544,8 @@ void i915_perf_fini(struct drm_i915_private *dev_priv) if (!dev_priv->perf.initialized) return; + unregister_sysctl_table(dev_priv->perf.sysctl_header); + memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops)); dev_priv->perf.initialized = false; } -- cgit v1.2.3 From 00319ba0434a99de12cb7241c136b5ef4aeaada7 Mon Sep 17 00:00:00 2001 From: Robert Bragg Date: Mon, 7 Nov 2016 19:49:55 +0000 Subject: drm/i915: add dev.i915.oa_max_sample_rate sysctl The maximum OA sampling frequency is now configurable via a dev.i915.oa_max_sample_rate sysctl parameter. Following the precedent set by perf's similar kernel.perf_event_max_sample_rate the default maximum rate is 100000Hz Signed-off-by: Robert Bragg Reviewed-by: Matthew Auld Reviewed-by: Sourab Gupta Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161107194957.3385-10-robert@sixbynine.org --- drivers/gpu/drm/i915/i915_perf.c | 61 ++++++++++++++++++++++++++++++++-------- 1 file changed, 50 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index e51c1d82255f..1a87fe967439 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -82,6 +82,21 @@ static u32 i915_perf_stream_paranoid = true; #define INVALID_CTX_ID 0xffffffff +/* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate + * + * 160ns is the smallest sampling period we can theoretically program the OA + * unit with on Haswell, corresponding to 6.25MHz. + */ +static int oa_sample_rate_hard_limit = 6250000; + +/* Theoretically we can program the OA unit to sample every 160ns but don't + * allow that by default unless root... + * + * The default threshold of 100000Hz is based on perf's similar + * kernel.perf_event_max_sample_rate sysctl parameter. + */ +static u32 i915_oa_max_sample_rate = 100000; + /* XXX: beware if future OA HW adds new report formats that the current * code assumes all reports have a power-of-two size and ~(size - 1) can * be used as a mask to align the OA tail pointer. @@ -1314,6 +1329,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv, } for (i = 0; i < n_props; i++) { + u64 oa_period, oa_freq_hz; u64 id, value; int ret; @@ -1359,21 +1375,35 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv, return -EINVAL; } - /* NB: The exponent represents a period as follows: - * - * 80ns * 2^(period_exponent + 1) - * - * Theoretically we can program the OA unit to sample + /* Theoretically we can program the OA unit to sample * every 160ns but don't allow that by default unless * root. * - * Referring to perf's - * kernel.perf_event_max_sample_rate for a precedent - * (100000 by default); with an OA exponent of 6 we get - * a period of 10.240 microseconds -just under 100000Hz + * On Haswell the period is derived from the exponent + * as: + * + * period = 80ns * 2^(exponent + 1) + */ + BUILD_BUG_ON(sizeof(oa_period) != 8); + oa_period = 80ull * (2ull << value); + + /* This check is primarily to ensure that oa_period <= + * UINT32_MAX (before passing to do_div which only + * accepts a u32 denominator), but we can also skip + * checking anything < 1Hz which implicitly can't be + * limited via an integer oa_max_sample_rate. */ - if (value < 6 && !capable(CAP_SYS_ADMIN)) { - DRM_ERROR("Minimum OA sampling exponent is 6 without root privileges\n"); + if (oa_period <= NSEC_PER_SEC) { + u64 tmp = NSEC_PER_SEC; + do_div(tmp, oa_period); + oa_freq_hz = tmp; + } else + oa_freq_hz = 0; + + if (oa_freq_hz > i915_oa_max_sample_rate && + !capable(CAP_SYS_ADMIN)) { + DRM_ERROR("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n", + i915_oa_max_sample_rate); return -EACCES; } @@ -1481,6 +1511,15 @@ static struct ctl_table oa_table[] = { .extra1 = &zero, .extra2 = &one, }, + { + .procname = "oa_max_sample_rate", + .data = &i915_oa_max_sample_rate, + .maxlen = sizeof(i915_oa_max_sample_rate), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &oa_sample_rate_hard_limit, + }, {} }; -- cgit v1.2.3 From c8a9483fb829ca38e9791372d89a0f1a6bb8baed Mon Sep 17 00:00:00 2001 From: Robert Bragg Date: Mon, 7 Nov 2016 19:49:56 +0000 Subject: drm/i915: Add more Haswell OA metric sets This adds 'compute', 'compute extended', 'memory reads', 'memory writes' and 'sampler balance' metric sets for Haswell. The code is auto generated from an XML description of metric sets, currently maintained in gputop, ref: https://github.com/rib/gputop > gputop-data/oa-*.xml > scripts/i915-perf-kernelgen.py $ make -C gputop-data -f Makefile.xml Signed-off-by: Robert Bragg Reviewed-by: Matthew Auld Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161107194957.3385-11-robert@sixbynine.org --- drivers/gpu/drm/i915/i915_oa_hsw.c | 559 ++++++++++++++++++++++++++++++++++++- 1 file changed, 558 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/i915_oa_hsw.c index 6af25cfefc22..4ddf756add31 100644 --- a/drivers/gpu/drm/i915/i915_oa_hsw.c +++ b/drivers/gpu/drm/i915/i915_oa_hsw.c @@ -31,9 +31,14 @@ enum metric_set_id { METRIC_SET_ID_RENDER_BASIC = 1, + METRIC_SET_ID_COMPUTE_BASIC, + METRIC_SET_ID_COMPUTE_EXTENDED, + METRIC_SET_ID_MEMORY_READS, + METRIC_SET_ID_MEMORY_WRITES, + METRIC_SET_ID_SAMPLER_BALANCE, }; -int i915_oa_n_builtin_metric_sets_hsw = 1; +int i915_oa_n_builtin_metric_sets_hsw = 6; static const struct i915_oa_reg b_counter_config_render_basic[] = { { _MMIO(0x2724), 0x00800000 }, @@ -112,6 +117,298 @@ get_render_basic_mux_config(struct drm_i915_private *dev_priv, return mux_config_render_basic; } +static const struct i915_oa_reg b_counter_config_compute_basic[] = { + { _MMIO(0x2710), 0x00000000 }, + { _MMIO(0x2714), 0x00800000 }, + { _MMIO(0x2718), 0xaaaaaaaa }, + { _MMIO(0x271c), 0xaaaaaaaa }, + { _MMIO(0x2720), 0x00000000 }, + { _MMIO(0x2724), 0x00800000 }, + { _MMIO(0x2728), 0xaaaaaaaa }, + { _MMIO(0x272c), 0xaaaaaaaa }, + { _MMIO(0x2740), 0x00000000 }, + { _MMIO(0x2744), 0x00000000 }, + { _MMIO(0x2748), 0x00000000 }, + { _MMIO(0x274c), 0x00000000 }, + { _MMIO(0x2750), 0x00000000 }, + { _MMIO(0x2754), 0x00000000 }, + { _MMIO(0x2758), 0x00000000 }, + { _MMIO(0x275c), 0x00000000 }, + { _MMIO(0x236c), 0x00000000 }, +}; + +static const struct i915_oa_reg mux_config_compute_basic[] = { + { _MMIO(0x253a4), 0x00000000 }, + { _MMIO(0x2681c), 0x01f00800 }, + { _MMIO(0x26820), 0x00001000 }, + { _MMIO(0x2781c), 0x01f00800 }, + { _MMIO(0x26520), 0x00000007 }, + { _MMIO(0x265a0), 0x00000007 }, + { _MMIO(0x25380), 0x00000010 }, + { _MMIO(0x2538c), 0x00300000 }, + { _MMIO(0x25384), 0xaa8aaaaa }, + { _MMIO(0x25404), 0xffffffff }, + { _MMIO(0x26800), 0x00004202 }, + { _MMIO(0x26808), 0x00605817 }, + { _MMIO(0x2680c), 0x10001005 }, + { _MMIO(0x26804), 0x00000000 }, + { _MMIO(0x27800), 0x00000102 }, + { _MMIO(0x27808), 0x0c0701e0 }, + { _MMIO(0x2780c), 0x000200a0 }, + { _MMIO(0x27804), 0x00000000 }, + { _MMIO(0x26484), 0x44000000 }, + { _MMIO(0x26704), 0x44000000 }, + { _MMIO(0x26500), 0x00000006 }, + { _MMIO(0x26510), 0x00000001 }, + { _MMIO(0x26504), 0x88000000 }, + { _MMIO(0x26580), 0x00000006 }, + { _MMIO(0x26590), 0x00000020 }, + { _MMIO(0x26584), 0x00000000 }, + { _MMIO(0x26104), 0x55822222 }, + { _MMIO(0x26184), 0xaa866666 }, + { _MMIO(0x25420), 0x08320c83 }, + { _MMIO(0x25424), 0x06820c83 }, + { _MMIO(0x2541c), 0x00000000 }, + { _MMIO(0x25428), 0x00000c03 }, +}; + +static const struct i915_oa_reg * +get_compute_basic_mux_config(struct drm_i915_private *dev_priv, + int *len) +{ + *len = ARRAY_SIZE(mux_config_compute_basic); + return mux_config_compute_basic; +} + +static const struct i915_oa_reg b_counter_config_compute_extended[] = { + { _MMIO(0x2724), 0xf0800000 }, + { _MMIO(0x2720), 0x00000000 }, + { _MMIO(0x2714), 0xf0800000 }, + { _MMIO(0x2710), 0x00000000 }, + { _MMIO(0x2770), 0x0007fe2a }, + { _MMIO(0x2774), 0x0000ff00 }, + { _MMIO(0x2778), 0x0007fe6a }, + { _MMIO(0x277c), 0x0000ff00 }, + { _MMIO(0x2780), 0x0007fe92 }, + { _MMIO(0x2784), 0x0000ff00 }, + { _MMIO(0x2788), 0x0007fea2 }, + { _MMIO(0x278c), 0x0000ff00 }, + { _MMIO(0x2790), 0x0007fe32 }, + { _MMIO(0x2794), 0x0000ff00 }, + { _MMIO(0x2798), 0x0007fe9a }, + { _MMIO(0x279c), 0x0000ff00 }, + { _MMIO(0x27a0), 0x0007ff23 }, + { _MMIO(0x27a4), 0x0000ff00 }, + { _MMIO(0x27a8), 0x0007fff3 }, + { _MMIO(0x27ac), 0x0000fffe }, +}; + +static const struct i915_oa_reg mux_config_compute_extended[] = { + { _MMIO(0x2681c), 0x3eb00800 }, + { _MMIO(0x26820), 0x00900000 }, + { _MMIO(0x25384), 0x02aaaaaa }, + { _MMIO(0x25404), 0x03ffffff }, + { _MMIO(0x26800), 0x00142284 }, + { _MMIO(0x26808), 0x0e629062 }, + { _MMIO(0x2680c), 0x3f6f55cb }, + { _MMIO(0x26810), 0x00000014 }, + { _MMIO(0x26804), 0x00000000 }, + { _MMIO(0x26104), 0x02aaaaaa }, + { _MMIO(0x26184), 0x02aaaaaa }, + { _MMIO(0x25420), 0x00000000 }, + { _MMIO(0x25424), 0x00000000 }, + { _MMIO(0x2541c), 0x00000000 }, + { _MMIO(0x25428), 0x00000000 }, +}; + +static const struct i915_oa_reg * +get_compute_extended_mux_config(struct drm_i915_private *dev_priv, + int *len) +{ + *len = ARRAY_SIZE(mux_config_compute_extended); + return mux_config_compute_extended; +} + +static const struct i915_oa_reg b_counter_config_memory_reads[] = { + { _MMIO(0x2724), 0xf0800000 }, + { _MMIO(0x2720), 0x00000000 }, + { _MMIO(0x2714), 0xf0800000 }, + { _MMIO(0x2710), 0x00000000 }, + { _MMIO(0x274c), 0x76543298 }, + { _MMIO(0x2748), 0x98989898 }, + { _MMIO(0x2744), 0x000000e4 }, + { _MMIO(0x2740), 0x00000000 }, + { _MMIO(0x275c), 0x98a98a98 }, + { _MMIO(0x2758), 0x88888888 }, + { _MMIO(0x2754), 0x000c5500 }, + { _MMIO(0x2750), 0x00000000 }, + { _MMIO(0x2770), 0x0007f81a }, + { _MMIO(0x2774), 0x0000fc00 }, + { _MMIO(0x2778), 0x0007f82a }, + { _MMIO(0x277c), 0x0000fc00 }, + { _MMIO(0x2780), 0x0007f872 }, + { _MMIO(0x2784), 0x0000fc00 }, + { _MMIO(0x2788), 0x0007f8ba }, + { _MMIO(0x278c), 0x0000fc00 }, + { _MMIO(0x2790), 0x0007f87a }, + { _MMIO(0x2794), 0x0000fc00 }, + { _MMIO(0x2798), 0x0007f8ea }, + { _MMIO(0x279c), 0x0000fc00 }, + { _MMIO(0x27a0), 0x0007f8e2 }, + { _MMIO(0x27a4), 0x0000fc00 }, + { _MMIO(0x27a8), 0x0007f8f2 }, + { _MMIO(0x27ac), 0x0000fc00 }, +}; + +static const struct i915_oa_reg mux_config_memory_reads[] = { + { _MMIO(0x253a4), 0x34300000 }, + { _MMIO(0x25440), 0x2d800000 }, + { _MMIO(0x25444), 0x00000008 }, + { _MMIO(0x25128), 0x0e600000 }, + { _MMIO(0x25380), 0x00000450 }, + { _MMIO(0x25390), 0x00052c43 }, + { _MMIO(0x25384), 0x00000000 }, + { _MMIO(0x25400), 0x00006144 }, + { _MMIO(0x25408), 0x0a418820 }, + { _MMIO(0x2540c), 0x000820e6 }, + { _MMIO(0x25404), 0xff500000 }, + { _MMIO(0x25100), 0x000005d6 }, + { _MMIO(0x2510c), 0x0ef00000 }, + { _MMIO(0x25104), 0x00000000 }, + { _MMIO(0x25420), 0x02108421 }, + { _MMIO(0x25424), 0x00008421 }, + { _MMIO(0x2541c), 0x00000000 }, + { _MMIO(0x25428), 0x00000000 }, +}; + +static const struct i915_oa_reg * +get_memory_reads_mux_config(struct drm_i915_private *dev_priv, + int *len) +{ + *len = ARRAY_SIZE(mux_config_memory_reads); + return mux_config_memory_reads; +} + +static const struct i915_oa_reg b_counter_config_memory_writes[] = { + { _MMIO(0x2724), 0xf0800000 }, + { _MMIO(0x2720), 0x00000000 }, + { _MMIO(0x2714), 0xf0800000 }, + { _MMIO(0x2710), 0x00000000 }, + { _MMIO(0x274c), 0x76543298 }, + { _MMIO(0x2748), 0x98989898 }, + { _MMIO(0x2744), 0x000000e4 }, + { _MMIO(0x2740), 0x00000000 }, + { _MMIO(0x275c), 0xbabababa }, + { _MMIO(0x2758), 0x88888888 }, + { _MMIO(0x2754), 0x000c5500 }, + { _MMIO(0x2750), 0x00000000 }, + { _MMIO(0x2770), 0x0007f81a }, + { _MMIO(0x2774), 0x0000fc00 }, + { _MMIO(0x2778), 0x0007f82a }, + { _MMIO(0x277c), 0x0000fc00 }, + { _MMIO(0x2780), 0x0007f822 }, + { _MMIO(0x2784), 0x0000fc00 }, + { _MMIO(0x2788), 0x0007f8ba }, + { _MMIO(0x278c), 0x0000fc00 }, + { _MMIO(0x2790), 0x0007f87a }, + { _MMIO(0x2794), 0x0000fc00 }, + { _MMIO(0x2798), 0x0007f8ea }, + { _MMIO(0x279c), 0x0000fc00 }, + { _MMIO(0x27a0), 0x0007f8e2 }, + { _MMIO(0x27a4), 0x0000fc00 }, + { _MMIO(0x27a8), 0x0007f8f2 }, + { _MMIO(0x27ac), 0x0000fc00 }, +}; + +static const struct i915_oa_reg mux_config_memory_writes[] = { + { _MMIO(0x253a4), 0x34300000 }, + { _MMIO(0x25440), 0x01500000 }, + { _MMIO(0x25444), 0x00000120 }, + { _MMIO(0x25128), 0x0c200000 }, + { _MMIO(0x25380), 0x00000450 }, + { _MMIO(0x25390), 0x00052c43 }, + { _MMIO(0x25384), 0x00000000 }, + { _MMIO(0x25400), 0x00007184 }, + { _MMIO(0x25408), 0x0a418820 }, + { _MMIO(0x2540c), 0x000820e6 }, + { _MMIO(0x25404), 0xff500000 }, + { _MMIO(0x25100), 0x000005d6 }, + { _MMIO(0x2510c), 0x1e700000 }, + { _MMIO(0x25104), 0x00000000 }, + { _MMIO(0x25420), 0x02108421 }, + { _MMIO(0x25424), 0x00008421 }, + { _MMIO(0x2541c), 0x00000000 }, + { _MMIO(0x25428), 0x00000000 }, +}; + +static const struct i915_oa_reg * +get_memory_writes_mux_config(struct drm_i915_private *dev_priv, + int *len) +{ + *len = ARRAY_SIZE(mux_config_memory_writes); + return mux_config_memory_writes; +} + +static const struct i915_oa_reg b_counter_config_sampler_balance[] = { + { _MMIO(0x2740), 0x00000000 }, + { _MMIO(0x2744), 0x00800000 }, + { _MMIO(0x2710), 0x00000000 }, + { _MMIO(0x2714), 0x00800000 }, + { _MMIO(0x2720), 0x00000000 }, + { _MMIO(0x2724), 0x00800000 }, +}; + +static const struct i915_oa_reg mux_config_sampler_balance[] = { + { _MMIO(0x2eb9c), 0x01906400 }, + { _MMIO(0x2fb9c), 0x01906400 }, + { _MMIO(0x253a4), 0x00000000 }, + { _MMIO(0x26b9c), 0x01906400 }, + { _MMIO(0x27b9c), 0x01906400 }, + { _MMIO(0x27104), 0x00a00000 }, + { _MMIO(0x27184), 0x00a50000 }, + { _MMIO(0x2e804), 0x00500000 }, + { _MMIO(0x2e984), 0x00500000 }, + { _MMIO(0x2eb04), 0x00500000 }, + { _MMIO(0x2eb80), 0x00000084 }, + { _MMIO(0x2eb8c), 0x14200000 }, + { _MMIO(0x2eb84), 0x00000000 }, + { _MMIO(0x2f804), 0x00050000 }, + { _MMIO(0x2f984), 0x00050000 }, + { _MMIO(0x2fb04), 0x00050000 }, + { _MMIO(0x2fb80), 0x00000084 }, + { _MMIO(0x2fb8c), 0x00050800 }, + { _MMIO(0x2fb84), 0x00000000 }, + { _MMIO(0x25380), 0x00000010 }, + { _MMIO(0x2538c), 0x000000c0 }, + { _MMIO(0x25384), 0xaa550000 }, + { _MMIO(0x25404), 0xffffc000 }, + { _MMIO(0x26804), 0x50000000 }, + { _MMIO(0x26984), 0x50000000 }, + { _MMIO(0x26b04), 0x50000000 }, + { _MMIO(0x26b80), 0x00000084 }, + { _MMIO(0x26b90), 0x00050800 }, + { _MMIO(0x26b84), 0x00000000 }, + { _MMIO(0x27804), 0x05000000 }, + { _MMIO(0x27984), 0x05000000 }, + { _MMIO(0x27b04), 0x05000000 }, + { _MMIO(0x27b80), 0x00000084 }, + { _MMIO(0x27b90), 0x00000142 }, + { _MMIO(0x27b84), 0x00000000 }, + { _MMIO(0x26104), 0xa0000000 }, + { _MMIO(0x26184), 0xa5000000 }, + { _MMIO(0x25424), 0x00008620 }, + { _MMIO(0x2541c), 0x00000000 }, + { _MMIO(0x25428), 0x0004a54a }, +}; + +static const struct i915_oa_reg * +get_sampler_balance_mux_config(struct drm_i915_private *dev_priv, + int *len) +{ + *len = ARRAY_SIZE(mux_config_sampler_balance); + return mux_config_sampler_balance; +} + int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv) { dev_priv->perf.oa.mux_regs = NULL; @@ -139,6 +436,106 @@ int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv) dev_priv->perf.oa.b_counter_regs_len = ARRAY_SIZE(b_counter_config_render_basic); + return 0; + case METRIC_SET_ID_COMPUTE_BASIC: + dev_priv->perf.oa.mux_regs = + get_compute_basic_mux_config(dev_priv, + &dev_priv->perf.oa.mux_regs_len); + if (!dev_priv->perf.oa.mux_regs) { + DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set"); + + /* EINVAL because *_register_sysfs already checked this + * and so it wouldn't have been advertised so userspace and + * so shouldn't have been requested + */ + return -EINVAL; + } + + dev_priv->perf.oa.b_counter_regs = + b_counter_config_compute_basic; + dev_priv->perf.oa.b_counter_regs_len = + ARRAY_SIZE(b_counter_config_compute_basic); + + return 0; + case METRIC_SET_ID_COMPUTE_EXTENDED: + dev_priv->perf.oa.mux_regs = + get_compute_extended_mux_config(dev_priv, + &dev_priv->perf.oa.mux_regs_len); + if (!dev_priv->perf.oa.mux_regs) { + DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set"); + + /* EINVAL because *_register_sysfs already checked this + * and so it wouldn't have been advertised so userspace and + * so shouldn't have been requested + */ + return -EINVAL; + } + + dev_priv->perf.oa.b_counter_regs = + b_counter_config_compute_extended; + dev_priv->perf.oa.b_counter_regs_len = + ARRAY_SIZE(b_counter_config_compute_extended); + + return 0; + case METRIC_SET_ID_MEMORY_READS: + dev_priv->perf.oa.mux_regs = + get_memory_reads_mux_config(dev_priv, + &dev_priv->perf.oa.mux_regs_len); + if (!dev_priv->perf.oa.mux_regs) { + DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set"); + + /* EINVAL because *_register_sysfs already checked this + * and so it wouldn't have been advertised so userspace and + * so shouldn't have been requested + */ + return -EINVAL; + } + + dev_priv->perf.oa.b_counter_regs = + b_counter_config_memory_reads; + dev_priv->perf.oa.b_counter_regs_len = + ARRAY_SIZE(b_counter_config_memory_reads); + + return 0; + case METRIC_SET_ID_MEMORY_WRITES: + dev_priv->perf.oa.mux_regs = + get_memory_writes_mux_config(dev_priv, + &dev_priv->perf.oa.mux_regs_len); + if (!dev_priv->perf.oa.mux_regs) { + DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set"); + + /* EINVAL because *_register_sysfs already checked this + * and so it wouldn't have been advertised so userspace and + * so shouldn't have been requested + */ + return -EINVAL; + } + + dev_priv->perf.oa.b_counter_regs = + b_counter_config_memory_writes; + dev_priv->perf.oa.b_counter_regs_len = + ARRAY_SIZE(b_counter_config_memory_writes); + + return 0; + case METRIC_SET_ID_SAMPLER_BALANCE: + dev_priv->perf.oa.mux_regs = + get_sampler_balance_mux_config(dev_priv, + &dev_priv->perf.oa.mux_regs_len); + if (!dev_priv->perf.oa.mux_regs) { + DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER_BALANCE\" metric set"); + + /* EINVAL because *_register_sysfs already checked this + * and so it wouldn't have been advertised so userspace and + * so shouldn't have been requested + */ + return -EINVAL; + } + + dev_priv->perf.oa.b_counter_regs = + b_counter_config_sampler_balance; + dev_priv->perf.oa.b_counter_regs_len = + ARRAY_SIZE(b_counter_config_sampler_balance); + return 0; default: return -ENODEV; @@ -167,6 +564,116 @@ static struct attribute_group group_render_basic = { .attrs = attrs_render_basic, }; +static ssize_t +show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC); +} + +static struct device_attribute dev_attr_compute_basic_id = { + .attr = { .name = "id", .mode = 0444 }, + .show = show_compute_basic_id, + .store = NULL, +}; + +static struct attribute *attrs_compute_basic[] = { + &dev_attr_compute_basic_id.attr, + NULL, +}; + +static struct attribute_group group_compute_basic = { + .name = "39ad14bc-2380-45c4-91eb-fbcb3aa7ae7b", + .attrs = attrs_compute_basic, +}; + +static ssize_t +show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED); +} + +static struct device_attribute dev_attr_compute_extended_id = { + .attr = { .name = "id", .mode = 0444 }, + .show = show_compute_extended_id, + .store = NULL, +}; + +static struct attribute *attrs_compute_extended[] = { + &dev_attr_compute_extended_id.attr, + NULL, +}; + +static struct attribute_group group_compute_extended = { + .name = "3865be28-6982-49fe-9494-e4d1b4795413", + .attrs = attrs_compute_extended, +}; + +static ssize_t +show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS); +} + +static struct device_attribute dev_attr_memory_reads_id = { + .attr = { .name = "id", .mode = 0444 }, + .show = show_memory_reads_id, + .store = NULL, +}; + +static struct attribute *attrs_memory_reads[] = { + &dev_attr_memory_reads_id.attr, + NULL, +}; + +static struct attribute_group group_memory_reads = { + .name = "bb5ed49b-2497-4095-94f6-26ba294db88a", + .attrs = attrs_memory_reads, +}; + +static ssize_t +show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES); +} + +static struct device_attribute dev_attr_memory_writes_id = { + .attr = { .name = "id", .mode = 0444 }, + .show = show_memory_writes_id, + .store = NULL, +}; + +static struct attribute *attrs_memory_writes[] = { + &dev_attr_memory_writes_id.attr, + NULL, +}; + +static struct attribute_group group_memory_writes = { + .name = "3358d639-9b5f-45ab-976d-9b08cbfc6240", + .attrs = attrs_memory_writes, +}; + +static ssize_t +show_sampler_balance_id(struct device *kdev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER_BALANCE); +} + +static struct device_attribute dev_attr_sampler_balance_id = { + .attr = { .name = "id", .mode = 0444 }, + .show = show_sampler_balance_id, + .store = NULL, +}; + +static struct attribute *attrs_sampler_balance[] = { + &dev_attr_sampler_balance_id.attr, + NULL, +}; + +static struct attribute_group group_sampler_balance = { + .name = "bc274488-b4b6-40c7-90da-b77d7ad16189", + .attrs = attrs_sampler_balance, +}; + int i915_perf_register_sysfs_hsw(struct drm_i915_private *dev_priv) { @@ -178,9 +685,49 @@ i915_perf_register_sysfs_hsw(struct drm_i915_private *dev_priv) if (ret) goto error_render_basic; } + if (get_compute_basic_mux_config(dev_priv, &mux_len)) { + ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic); + if (ret) + goto error_compute_basic; + } + if (get_compute_extended_mux_config(dev_priv, &mux_len)) { + ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended); + if (ret) + goto error_compute_extended; + } + if (get_memory_reads_mux_config(dev_priv, &mux_len)) { + ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads); + if (ret) + goto error_memory_reads; + } + if (get_memory_writes_mux_config(dev_priv, &mux_len)) { + ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes); + if (ret) + goto error_memory_writes; + } + if (get_sampler_balance_mux_config(dev_priv, &mux_len)) { + ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler_balance); + if (ret) + goto error_sampler_balance; + } return 0; +error_sampler_balance: + if (get_sampler_balance_mux_config(dev_priv, &mux_len)) + sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes); +error_memory_writes: + if (get_sampler_balance_mux_config(dev_priv, &mux_len)) + sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads); +error_memory_reads: + if (get_sampler_balance_mux_config(dev_priv, &mux_len)) + sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended); +error_compute_extended: + if (get_sampler_balance_mux_config(dev_priv, &mux_len)) + sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic); +error_compute_basic: + if (get_sampler_balance_mux_config(dev_priv, &mux_len)) + sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic); error_render_basic: return ret; } @@ -192,4 +739,14 @@ i915_perf_unregister_sysfs_hsw(struct drm_i915_private *dev_priv) if (get_render_basic_mux_config(dev_priv, &mux_len)) sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic); + if (get_compute_basic_mux_config(dev_priv, &mux_len)) + sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic); + if (get_compute_extended_mux_config(dev_priv, &mux_len)) + sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended); + if (get_memory_reads_mux_config(dev_priv, &mux_len)) + sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads); + if (get_memory_writes_mux_config(dev_priv, &mux_len)) + sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes); + if (get_sampler_balance_mux_config(dev_priv, &mux_len)) + sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_balance); } -- cgit v1.2.3 From 7abbd8d670bb928366aa94332a173aa3d394ebfe Mon Sep 17 00:00:00 2001 From: Robert Bragg Date: Mon, 7 Nov 2016 19:49:57 +0000 Subject: drm/i915: Add a kerneldoc summary for i915_perf.c In particular this tries to capture for posterity some of the early challenges we had with using the core perf infrastructure in case we ever want to revisit adapting perf for device metrics. Cc: Chris Wilson Signed-off-by: Robert Bragg Reviewed-by: Matthew Auld Reviewed-by: Sourab Gupta Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161107194957.3385-12-robert@sixbynine.org --- drivers/gpu/drm/i915/i915_perf.c | 163 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 163 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 1a87fe967439..95512824922b 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -24,6 +24,169 @@ * Robert Bragg */ + +/** + * DOC: i915 Perf, streaming API for GPU metrics + * + * Gen graphics supports a large number of performance counters that can help + * driver and application developers understand and optimize their use of the + * GPU. + * + * This i915 perf interface enables userspace to configure and open a file + * descriptor representing a stream of GPU metrics which can then be read() as + * a stream of sample records. + * + * The interface is particularly suited to exposing buffered metrics that are + * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU. + * + * Streams representing a single context are accessible to applications with a + * corresponding drm file descriptor, such that OpenGL can use the interface + * without special privileges. Access to system-wide metrics requires root + * privileges by default, unless changed via the dev.i915.perf_event_paranoid + * sysctl option. + * + * + * The interface was initially inspired by the core Perf infrastructure but + * some notable differences are: + * + * i915 perf file descriptors represent a "stream" instead of an "event"; where + * a perf event primarily corresponds to a single 64bit value, while a stream + * might sample sets of tightly-coupled counters, depending on the + * configuration. For example the Gen OA unit isn't designed to support + * orthogonal configurations of individual counters; it's configured for a set + * of related counters. Samples for an i915 perf stream capturing OA metrics + * will include a set of counter values packed in a compact HW specific format. + * The OA unit supports a number of different packing formats which can be + * selected by the user opening the stream. Perf has support for grouping + * events, but each event in the group is configured, validated and + * authenticated individually with separate system calls. + * + * i915 perf stream configurations are provided as an array of u64 (key,value) + * pairs, instead of a fixed struct with multiple miscellaneous config members, + * interleaved with event-type specific members. + * + * i915 perf doesn't support exposing metrics via an mmap'd circular buffer. + * The supported metrics are being written to memory by the GPU unsynchronized + * with the CPU, using HW specific packing formats for counter sets. Sometimes + * the constraints on HW configuration require reports to be filtered before it + * would be acceptable to expose them to unprivileged applications - to hide + * the metrics of other processes/contexts. For these use cases a read() based + * interface is a good fit, and provides an opportunity to filter data as it + * gets copied from the GPU mapped buffers to userspace buffers. + * + * + * Some notes regarding Linux Perf: + * -------------------------------- + * + * The first prototype of this driver was based on the core perf + * infrastructure, and while we did make that mostly work, with some changes to + * perf, we found we were breaking or working around too many assumptions baked + * into perf's currently cpu centric design. + * + * In the end we didn't see a clear benefit to making perf's implementation and + * interface more complex by changing design assumptions while we knew we still + * wouldn't be able to use any existing perf based userspace tools. + * + * Also considering the Gen specific nature of the Observability hardware and + * how userspace will sometimes need to combine i915 perf OA metrics with + * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're + * expecting the interface to be used by a platform specific userspace such as + * OpenGL or tools. This is to say; we aren't inherently missing out on having + * a standard vendor/architecture agnostic interface by not using perf. + * + * + * For posterity, in case we might re-visit trying to adapt core perf to be + * better suited to exposing i915 metrics these were the main pain points we + * hit: + * + * - The perf based OA PMU driver broke some significant design assumptions: + * + * Existing perf pmus are used for profiling work on a cpu and we were + * introducing the idea of _IS_DEVICE pmus with different security + * implications, the need to fake cpu-related data (such as user/kernel + * registers) to fit with perf's current design, and adding _DEVICE records + * as a way to forward device-specific status records. + * + * The OA unit writes reports of counters into a circular buffer, without + * involvement from the CPU, making our PMU driver the first of a kind. + * + * Given the way we were periodically forward data from the GPU-mapped, OA + * buffer to perf's buffer, those bursts of sample writes looked to perf like + * we were sampling too fast and so we had to subvert its throttling checks. + * + * Perf supports groups of counters and allows those to be read via + * transactions internally but transactions currently seem designed to be + * explicitly initiated from the cpu (say in response to a userspace read()) + * and while we could pull a report out of the OA buffer we can't + * trigger a report from the cpu on demand. + * + * Related to being report based; the OA counters are configured in HW as a + * set while perf generally expects counter configurations to be orthogonal. + * Although counters can be associated with a group leader as they are + * opened, there's no clear precedent for being able to provide group-wide + * configuration attributes (for example we want to let userspace choose the + * OA unit report format used to capture all counters in a set, or specify a + * GPU context to filter metrics on). We avoided using perf's grouping + * feature and forwarded OA reports to userspace via perf's 'raw' sample + * field. This suited our userspace well considering how coupled the counters + * are when dealing with normalizing. It would be inconvenient to split + * counters up into separate events, only to require userspace to recombine + * them. For Mesa it's also convenient to be forwarded raw, periodic reports + * for combining with the side-band raw reports it captures using + * MI_REPORT_PERF_COUNT commands. + * + * _ As a side note on perf's grouping feature; there was also some concern + * that using PERF_FORMAT_GROUP as a way to pack together counter values + * would quite drastically inflate our sample sizes, which would likely + * lower the effective sampling resolutions we could use when the available + * memory bandwidth is limited. + * + * With the OA unit's report formats, counters are packed together as 32 + * or 40bit values, with the largest report size being 256 bytes. + * + * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a + * documented ordering to the values, implying PERF_FORMAT_ID must also be + * used to add a 64bit ID before each value; giving 16 bytes per counter. + * + * Related to counter orthogonality; we can't time share the OA unit, while + * event scheduling is a central design idea within perf for allowing + * userspace to open + enable more events than can be configured in HW at any + * one time. The OA unit is not designed to allow re-configuration while in + * use. We can't reconfigure the OA unit without losing internal OA unit + * state which we can't access explicitly to save and restore. Reconfiguring + * the OA unit is also relatively slow, involving ~100 register writes. From + * userspace Mesa also depends on a stable OA configuration when emitting + * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be + * disabled while there are outstanding MI_RPC commands lest we hang the + * command streamer. + * + * The contents of sample records aren't extensible by device drivers (i.e. + * the sample_type bits). As an example; Sourab Gupta had been looking to + * attach GPU timestamps to our OA samples. We were shoehorning OA reports + * into sample records by using the 'raw' field, but it's tricky to pack more + * than one thing into this field because events/core.c currently only lets a + * pmu give a single raw data pointer plus len which will be copied into the + * ring buffer. To include more than the OA report we'd have to copy the + * report into an intermediate larger buffer. I'd been considering allowing a + * vector of data+len values to be specified for copying the raw data, but + * it felt like a kludge to being using the raw field for this purpose. + * + * - It felt like our perf based PMU was making some technical compromises + * just for the sake of using perf: + * + * perf_event_open() requires events to either relate to a pid or a specific + * cpu core, while our device pmu related to neither. Events opened with a + * pid will be automatically enabled/disabled according to the scheduling of + * that process - so not appropriate for us. When an event is related to a + * cpu id, perf ensures pmu methods will be invoked via an inter process + * interrupt on that core. To avoid invasive changes our userspace opened OA + * perf events for a specific cpu. This was workable but it meant the + * majority of the OA driver ran in atomic context, including all OA report + * forwarding, which wasn't really necessary in our case and seems to make + * our locking requirements somewhat complex as we handled the interaction + * with the rest of the i915 driver. + */ + #include #include -- cgit v1.2.3 From d9e9da64c4a762493c5ac24b3b2bbc43c2754786 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 22 Nov 2016 14:41:18 +0000 Subject: drm/i915: Don't deref context->file_priv ERR_PTR upon reset When a user context is closed, it's file_priv backpointer is replaced by ERR_PTR(-EBADF); be careful not to chase this invalid pointer after a hang and a GPU reset. Signed-off-by: Chris Wilson Fixes: b083a0870c79 ("drm/i915: Add per client max context ban limit") Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: http://patchwork.freedesktop.org/patch/msgid/20161122144121.7379-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b38d4f8d1875..b0dd0f0ea65a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2647,16 +2647,12 @@ static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) ctx->name, ctx->ban_score, yesno(ctx->banned)); - if (!ctx->file_priv) + if (!ctx->banned || IS_ERR_OR_NULL(ctx->file_priv)) return; - if (ctx->banned) { - ctx->file_priv->context_bans++; - - DRM_DEBUG_DRIVER("client %s has has %d context banned\n", - ctx->name, - ctx->file_priv->context_bans); - } + ctx->file_priv->context_bans++; + DRM_DEBUG_DRIVER("client %s has had %d context banned\n", + ctx->name, ctx->file_priv->context_bans); } static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx) -- cgit v1.2.3 From c2a126a46d4c843e9e56b7cad6a14b3791ffab47 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 22 Nov 2016 14:41:19 +0000 Subject: drm/i915: Disable hangcheck when wedged If the gpu reset fails and the machine is terminally wedged, further hangchecks achieve nothing but noise. Disable them, with a corollary that we re-enable hangchecking after a successful GPU reset in case the user is artificially bringing the machine back to life through the debug interface. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: http://patchwork.freedesktop.org/patch/msgid/20161122144121.7379-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_drv.c | 2 ++ drivers/gpu/drm/i915/intel_hangcheck.c | 3 +++ 2 files changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ff13503eae08..b893e67b4897 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1822,6 +1822,8 @@ void i915_reset(struct drm_i915_private *dev_priv) goto error; } + i915_queue_hangcheck(dev_priv); + wakeup: wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS); return; diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c index c03db022a6d8..f05971f5586f 100644 --- a/drivers/gpu/drm/i915/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/intel_hangcheck.c @@ -434,6 +434,9 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (!READ_ONCE(dev_priv->gt.awake)) return; + if (i915_terminally_wedged(&dev_priv->gpu_error)) + return; + /* As enabling the GPU requires fairly extensive mmio access, * periodically arm the mmio checker to see if we are triggering * any invalid access. -- cgit v1.2.3 From 3dcf93f7f23a61e867a5ccadaf651cb2d29229fd Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 22 Nov 2016 14:41:20 +0000 Subject: drm/i915: Complete requests in nop_submit_request Since the submit/execute split in commit d55ac5bf97c6 ("drm/i915: Defer transfer onto execution timeline to actual hw submission") the global seqno advance was deferred until the submit_request callback. After wedging the GPU, we were installing a nop_submit_request handler (to avoid waking up the dead hw) but I had missed converting this over to the new scheme. Under the new scheme, we have to explicitly call i915_gem_submit_request() from the submit_request handler to mark the request as on the hardware. If we don't the request is always pending, and any waiter will continue to wait indefinitely and hangcheck will not be able to resolve the lockup. References: https://bugs.freedesktop.org/show_bug.cgi?id=98748 Testcase: igt/gem_eio/in-flight Fixes: d55ac5bf97c6 ("drm/i915: Defer transfer onto execution timeline to actual hw submission") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20161122144121.7379-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b0dd0f0ea65a..91f1ac30c5fa 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2781,6 +2781,8 @@ void i915_gem_reset(struct drm_i915_private *dev_priv) static void nop_submit_request(struct drm_i915_gem_request *request) { + i915_gem_request_submit(request); + intel_engine_init_global_seqno(request->engine, request->global_seqno); } static void i915_gem_cleanup_engine(struct intel_engine_cs *engine) -- cgit v1.2.3 From 20e4933c478a1ca694b38fa4ac44d99e659941f5 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 22 Nov 2016 14:41:21 +0000 Subject: drm/i915: Stop the machine as we install the wedged submit_request handler In order to prevent a race between the old callback submitting an incomplete request and i915_gem_set_wedged() installing its nop handler, we must ensure that the swap occurs when the machine is idle (stop_machine). v2: move context lost from out of BKL. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: http://patchwork.freedesktop.org/patch/msgid/20161122144121.7379-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 91f1ac30c5fa..3d4e07e9734f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -2787,6 +2788,12 @@ static void nop_submit_request(struct drm_i915_gem_request *request) static void i915_gem_cleanup_engine(struct intel_engine_cs *engine) { + /* We need to be sure that no thread is running the old callback as + * we install the nop handler (otherwise we would submit a request + * to hardware that will never complete). In order to prevent this + * race, we wait until the machine is idle before making the swap + * (using stop_machine()). + */ engine->submit_request = nop_submit_request; /* Mark all pending requests as complete so that any concurrent @@ -2817,20 +2824,29 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine) } } -void i915_gem_set_wedged(struct drm_i915_private *dev_priv) +static int __i915_gem_set_wedged_BKL(void *data) { + struct drm_i915_private *i915 = data; struct intel_engine_cs *engine; enum intel_engine_id id; + for_each_engine(engine, i915, id) + i915_gem_cleanup_engine(engine); + + return 0; +} + +void i915_gem_set_wedged(struct drm_i915_private *dev_priv) +{ lockdep_assert_held(&dev_priv->drm.struct_mutex); set_bit(I915_WEDGED, &dev_priv->gpu_error.flags); - i915_gem_context_lost(dev_priv); - for_each_engine(engine, dev_priv, id) - i915_gem_cleanup_engine(engine); - mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0); + stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL); + i915_gem_context_lost(dev_priv); i915_gem_retire_requests(dev_priv); + + mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0); } static void -- cgit v1.2.3 From f2b667b658f998d4bba12ca0204c01f34fdb0321 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Mon, 21 Nov 2016 21:15:04 +0200 Subject: drm/i915/lspcon: Ensure AUX CH is awake while in DP Sleep state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some LSPCON adaptors won't properly wake up in response to an AUX request after the adaptor was placed to a DP Sink Sleep state (via writing 0x2 to DP_SET_POWER). Based on the DP 1.4 specification 5.2.5, the sink may place the AUX CH into a low-power state while in Sleep state, but should wake it up in response to an AUX request within 1-20ms (answering with AUX defers while waking it up). As opposed to this at least the ParadTech PS175 adaptor won't fully wake in response to the first I2C-over-AUX access and will occasionally ignore the offset in I2C messages. This can result in accessing the DDC register at offset 0 regardless of the specified offset and the LSPCON detection failing. To fix this do an initial dummy read from the DPCD area. The PS175 will defer this access until it's fully woken (taking ~150ms) making sure the following I2C-over-AUX accesses will work correctly. Cc: Shashank Sharma Cc: Ville Syrjälä Cc: Jani Nikula Reference: https://bugs.freedesktop.org/show_bug.cgi?id=98353 Signed-off-by: Imre Deak Reviewed-by: Shashank Sharma Link: http://patchwork.freedesktop.org/patch/msgid/1479755707-29596-2-git-send-email-imre.deak@intel.com --- drivers/gpu/drm/i915/intel_lspcon.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index daa523410953..50131249d0bf 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c @@ -77,11 +77,29 @@ static int lspcon_change_mode(struct intel_lspcon *lspcon, return 0; } +static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon) +{ + uint8_t rev; + + if (drm_dp_dpcd_readb(&lspcon_to_intel_dp(lspcon)->aux, DP_DPCD_REV, + &rev) != 1) { + DRM_DEBUG_KMS("Native AUX CH down\n"); + return false; + } + + DRM_DEBUG_KMS("Native AUX CH up, DPCD version: %d.%d\n", + rev >> 4, rev & 0xf); + + return true; +} + static bool lspcon_probe(struct intel_lspcon *lspcon) { enum drm_dp_dual_mode_type adaptor_type; struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc; + lspcon_wake_native_aux_ch(lspcon); + /* Lets probe the adaptor and check its type */ adaptor_type = drm_dp_dual_mode_detect(adapter); if (adaptor_type != DRM_DP_DUAL_MODE_LSPCON) { @@ -132,7 +150,8 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon) void lspcon_resume(struct intel_lspcon *lspcon) { - lspcon_resume_in_pcon_wa(lspcon); + if (lspcon_wake_native_aux_ch(lspcon)) + lspcon_resume_in_pcon_wa(lspcon); if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON, true)) DRM_ERROR("LSPCON resume failed\n"); -- cgit v1.2.3 From dd75f6dd2e7d5de544e8aa8b75d1e50308cd8561 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Mon, 21 Nov 2016 21:15:05 +0200 Subject: drm/i915/lspcon: Add dp_to_lspcon helper() We need to get to LSPCON in the next patch, so factor out the helper for it. While at it also remove the redundant GEN9 check. Cc: Shashank Sharma Signed-off-by: Imre Deak Reviewed-by: Shashank Sharma Link: http://patchwork.freedesktop.org/patch/msgid/1479755707-29596-3-git-send-email-imre.deak@intel.com --- drivers/gpu/drm/i915/intel_dp.c | 7 +++---- drivers/gpu/drm/i915/intel_drv.h | 6 ++++++ 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 90283edcafba..16c19d789c66 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -4753,14 +4753,13 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) void intel_dp_encoder_reset(struct drm_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->dev); - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); - struct intel_lspcon *lspcon = &intel_dig_port->lspcon; - struct intel_dp *intel_dp = &intel_dig_port->dp; + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); if (!HAS_DDI(dev_priv)) intel_dp->DP = I915_READ(intel_dp->output_reg); - if (IS_GEN9(dev_priv) && lspcon->active) + if (lspcon->active) lspcon_resume(lspcon); if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index cd132c216a67..cf47e8a6c2fb 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1089,6 +1089,12 @@ dp_to_dig_port(struct intel_dp *intel_dp) return container_of(intel_dp, struct intel_digital_port, dp); } +static inline struct intel_lspcon * +dp_to_lspcon(struct intel_dp *intel_dp) +{ + return &dp_to_dig_port(intel_dp)->lspcon; +} + static inline struct intel_digital_port * hdmi_to_dig_port(struct intel_hdmi *intel_hdmi) { -- cgit v1.2.3 From 357c0ae9198ad7f526f229c4f33921e8e3a3e9a0 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Mon, 21 Nov 2016 21:15:06 +0200 Subject: drm/i915/lspcon: Wait for expected LSPCON mode to settle MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some LSPCON adaptors may return an incorrect LSPCON mode right after waking from DP Sleep state. This is the case at least for the ParadTech PS175 adaptor, both when waking because of exiting the DP Sleep to active state, or due to any other AUX CH transfer. We can determine the current expected mode based on whether the DPCD area is accessible, since according to the LSPCON spec this area is only accesible in PCON mode. This wait will avoid us trying to change the mode, while the current expected mode hasn't settled yet and start link training before the adaptor thinks it's in PCON mode after waking from DP Sleep state. Cc: Shashank Sharma Cc: Ville Syrjälä Cc: Jani Nikula Signed-off-by: Imre Deak Reviewed-by: Shashank Sharma Link: http://patchwork.freedesktop.org/patch/msgid/1479755707-29596-4-git-send-email-imre.deak@intel.com --- drivers/gpu/drm/i915/intel_dp.c | 5 +++ drivers/gpu/drm/i915/intel_drv.h | 1 + drivers/gpu/drm/i915/intel_lspcon.c | 70 ++++++++++++++++++++++++++++++++----- 3 files changed, 68 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 16c19d789c66..8026a83fec9e 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -2401,6 +2401,8 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D3); } else { + struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); + /* * When turning on, we need to retry for 1ms to give the sink * time to wake up. @@ -2412,6 +2414,9 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) break; msleep(1); } + + if (ret == 1 && lspcon->active) + lspcon_wait_pcon_mode(lspcon); } if (ret != 1) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index cf47e8a6c2fb..d16590481ce4 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1830,4 +1830,5 @@ void intel_color_load_luts(struct drm_crtc_state *crtc_state); /* intel_lspcon.c */ bool lspcon_init(struct intel_digital_port *intel_dig_port); void lspcon_resume(struct intel_lspcon *lspcon); +void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon); #endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index 50131249d0bf..281127d9f495 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c @@ -35,16 +35,54 @@ static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon) return &dig_port->dp; } +static const char *lspcon_mode_name(enum drm_lspcon_mode mode) +{ + switch (mode) { + case DRM_LSPCON_MODE_PCON: + return "PCON"; + case DRM_LSPCON_MODE_LS: + return "LS"; + case DRM_LSPCON_MODE_INVALID: + return "INVALID"; + default: + MISSING_CASE(mode); + return "INVALID"; + } +} + static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon) { - enum drm_lspcon_mode current_mode = DRM_LSPCON_MODE_INVALID; + enum drm_lspcon_mode current_mode; struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc; - if (drm_lspcon_get_mode(adapter, ¤t_mode)) + if (drm_lspcon_get_mode(adapter, ¤t_mode)) { DRM_ERROR("Error reading LSPCON mode\n"); - else - DRM_DEBUG_KMS("Current LSPCON mode %s\n", - current_mode == DRM_LSPCON_MODE_PCON ? "PCON" : "LS"); + return DRM_LSPCON_MODE_INVALID; + } + return current_mode; +} + +static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon, + enum drm_lspcon_mode mode) +{ + enum drm_lspcon_mode current_mode; + + current_mode = lspcon_get_current_mode(lspcon); + if (current_mode == mode || current_mode == DRM_LSPCON_MODE_INVALID) + goto out; + + DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n", + lspcon_mode_name(mode)); + + wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode || + current_mode == DRM_LSPCON_MODE_INVALID, 100); + if (current_mode != mode) + DRM_DEBUG_KMS("LSPCON mode hasn't settled\n"); + +out: + DRM_DEBUG_KMS("Current LSPCON mode %s\n", + lspcon_mode_name(current_mode)); + return current_mode; } @@ -97,8 +135,10 @@ static bool lspcon_probe(struct intel_lspcon *lspcon) { enum drm_dp_dual_mode_type adaptor_type; struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc; + enum drm_lspcon_mode expected_mode; - lspcon_wake_native_aux_ch(lspcon); + expected_mode = lspcon_wake_native_aux_ch(lspcon) ? + DRM_LSPCON_MODE_PCON : DRM_LSPCON_MODE_LS; /* Lets probe the adaptor and check its type */ adaptor_type = drm_dp_dual_mode_detect(adapter); @@ -110,7 +150,7 @@ static bool lspcon_probe(struct intel_lspcon *lspcon) /* Yay ... got a LSPCON device */ DRM_DEBUG_KMS("LSPCON detected\n"); - lspcon->mode = lspcon_get_current_mode(lspcon); + lspcon->mode = lspcon_wait_mode(lspcon, expected_mode); lspcon->active = true; return true; } @@ -150,8 +190,17 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon) void lspcon_resume(struct intel_lspcon *lspcon) { - if (lspcon_wake_native_aux_ch(lspcon)) + enum drm_lspcon_mode expected_mode; + + if (lspcon_wake_native_aux_ch(lspcon)) { + expected_mode = DRM_LSPCON_MODE_PCON; lspcon_resume_in_pcon_wa(lspcon); + } else { + expected_mode = DRM_LSPCON_MODE_LS; + } + + if (lspcon_wait_mode(lspcon, expected_mode) == DRM_LSPCON_MODE_PCON) + return; if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON, true)) DRM_ERROR("LSPCON resume failed\n"); @@ -159,6 +208,11 @@ void lspcon_resume(struct intel_lspcon *lspcon) DRM_DEBUG_KMS("LSPCON resume success\n"); } +void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon) +{ + lspcon_wait_mode(lspcon, DRM_LSPCON_MODE_PCON); +} + bool lspcon_init(struct intel_digital_port *intel_dig_port) { struct intel_dp *dp = &intel_dig_port->dp; -- cgit v1.2.3 From 902488004658ca3d8ee023875e3ac3a7965a8bc9 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Mon, 21 Nov 2016 21:15:07 +0200 Subject: drm/i915/lspcon: Remove unused force change mode parameter All callers asked for a forced change but the function ignored this parameter. It doesn't seem to be necessary to force the change in any case so let's just remove the parameter. Cc: Shashank Sharma Signed-off-by: Imre Deak Reviewed-by: Shashank Sharma Link: http://patchwork.freedesktop.org/patch/msgid/1479755707-29596-5-git-send-email-imre.deak@intel.com --- drivers/gpu/drm/i915/intel_lspcon.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index 281127d9f495..f6d4e6940257 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c @@ -87,7 +87,7 @@ out: } static int lspcon_change_mode(struct intel_lspcon *lspcon, - enum drm_lspcon_mode mode, bool force) + enum drm_lspcon_mode mode) { int err; enum drm_lspcon_mode current_mode; @@ -202,7 +202,7 @@ void lspcon_resume(struct intel_lspcon *lspcon) if (lspcon_wait_mode(lspcon, expected_mode) == DRM_LSPCON_MODE_PCON) return; - if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON, true)) + if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON)) DRM_ERROR("LSPCON resume failed\n"); else DRM_DEBUG_KMS("LSPCON resume success\n"); @@ -239,8 +239,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port) * 2.0 sinks. */ if (lspcon->active && lspcon->mode != DRM_LSPCON_MODE_PCON) { - if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON, - true) < 0) { + if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) { DRM_ERROR("LSPCON mode change to PCON failed\n"); return false; } -- cgit v1.2.3 From e853ab7ca3f6c88cbef18fbc6f6ba6a031d9ac14 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Wed, 23 Nov 2016 10:20:21 +0000 Subject: drm/i915/guc: Remove unused intel_guc_fw struct member Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/1479896421-20611-1-git-send-email-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/intel_guc.h | 1 - drivers/gpu/drm/i915/intel_guc_loader.c | 1 - 2 files changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index 0053258e03d3..02337a81abc2 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -103,7 +103,6 @@ enum intel_guc_fw_status { * of fetching, caching, and loading the firmware image into the GuC. */ struct intel_guc_fw { - struct drm_device * guc_dev; const char * guc_fw_path; size_t guc_fw_size; struct drm_i915_gem_object * guc_fw_obj; diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index 34d6ad2cf7c1..e2d0bdaee0c8 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -754,7 +754,6 @@ void intel_guc_init(struct drm_device *dev) fw_path = ""; /* unknown device */ } - guc_fw->guc_dev = dev; guc_fw->guc_fw_path = fw_path; guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE; guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE; -- cgit v1.2.3 From 793b61ea9f9972be0836a9a15d0d6a520daaf124 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Wed, 23 Nov 2016 10:49:15 +0000 Subject: drm/i915: i915_gem_alloc_context_obj can be static It has only one call site from the same file. v2: Also rename it to alloc_context_obj. (Chris Wilson) Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Cc: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/1479898155-21014-1-git-send-email-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_drv.h | 2 -- drivers/gpu/drm/i915/i915_gem_context.c | 7 +++---- 2 files changed, 3 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 970e50bf9884..c44f2410f10f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3342,8 +3342,6 @@ struct i915_vma * i915_gem_context_pin_legacy(struct i915_gem_context *ctx, unsigned int flags); void i915_gem_context_free(struct kref *ctx_ref); -struct drm_i915_gem_object * -i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); struct i915_gem_context * i915_gem_context_create_gvt(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index f82936a2fcce..a6add0c14045 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -166,8 +166,8 @@ void i915_gem_context_free(struct kref *ctx_ref) kfree(ctx); } -struct drm_i915_gem_object * -i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) +static struct drm_i915_gem_object * +alloc_context_obj(struct drm_device *dev, u64 size) { struct drm_i915_gem_object *obj; int ret; @@ -286,8 +286,7 @@ __create_hw_context(struct drm_device *dev, struct drm_i915_gem_object *obj; struct i915_vma *vma; - obj = i915_gem_alloc_context_obj(dev, - dev_priv->hw_context_size); + obj = alloc_context_obj(dev, dev_priv->hw_context_size); if (IS_ERR(obj)) { ret = PTR_ERR(obj); goto err_out; -- cgit v1.2.3 From d9348dec902ff36e0f1b25ccf1f4be25fc1ac409 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 22 Nov 2016 22:21:53 +0200 Subject: drm/i915: Make skl_write_{plane,cursor}_wm() static MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Someone forgot to make skl_write_{plane,cursor}_wm() static when removing the prototypes from the header. Sparse isn't pleased. Cc: Maarten Lankhorst Cc: Lyude Cc: Matt Roper Fixes: e62929b3f628 ("drm/i915/gen9+: Program watermarks as a separate step during evasion, v3.") Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1479846113-24745-1-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: Lyude Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_pm.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index e207dc69e8b3..cbd0f3269b2d 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3851,10 +3851,10 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv, I915_WRITE(reg, val); } -void skl_write_plane_wm(struct intel_crtc *intel_crtc, - const struct skl_plane_wm *wm, - const struct skl_ddb_allocation *ddb, - int plane) +static void skl_write_plane_wm(struct intel_crtc *intel_crtc, + const struct skl_plane_wm *wm, + const struct skl_ddb_allocation *ddb, + int plane) { struct drm_crtc *crtc = &intel_crtc->base; struct drm_device *dev = crtc->dev; @@ -3875,9 +3875,9 @@ void skl_write_plane_wm(struct intel_crtc *intel_crtc, &ddb->y_plane[pipe][plane]); } -void skl_write_cursor_wm(struct intel_crtc *intel_crtc, - const struct skl_plane_wm *wm, - const struct skl_ddb_allocation *ddb) +static void skl_write_cursor_wm(struct intel_crtc *intel_crtc, + const struct skl_plane_wm *wm, + const struct skl_ddb_allocation *ddb) { struct drm_crtc *crtc = &intel_crtc->base; struct drm_device *dev = crtc->dev; -- cgit v1.2.3 From b14e5848c01de9a297d8fa2999ad324d45afb536 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 22 Nov 2016 18:01:56 +0200 Subject: drm/i915: Add per-pipe plane identifier MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As I told people in [1] we really should not be confusing enum plane as a per-pipe plane identifier. Looks like that happened nonetheless, so let's fix it up by splitting the two into two enums. We'll also want something we just directly pass to various register offset macros and whatnot on SKL+. So let's make this new thing work for that. Currently we pass intel_plane->plane for the "sprites" and just a hardcoded zero for the "primary" planes. We want to get rid of that hardocoding so that we can share the same code for all planes (apart from the legacy cursor of course). [1] https://lists.freedesktop.org/archives/intel-gfx/2015-September/076082.html Cc: Matt Roper Cc: Daniel Vetter Signed-off-by: Ville Syrjälä Reviewed-by: Paulo Zanoni Link: http://patchwork.freedesktop.org/patch/msgid/1479830524-7882-2-git-send-email-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/i915_drv.h | 28 +++++++++++++++++++++------- drivers/gpu/drm/i915/intel_display.c | 2 ++ drivers/gpu/drm/i915/intel_drv.h | 3 ++- drivers/gpu/drm/i915/intel_sprite.c | 1 + 4 files changed, 26 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c44f2410f10f..b12296ef0b4f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -180,22 +180,36 @@ static inline bool transcoder_is_dsi(enum transcoder transcoder) } /* - * I915_MAX_PLANES in the enum below is the maximum (across all platforms) - * number of planes per CRTC. Not all platforms really have this many planes, - * which means some arrays of size I915_MAX_PLANES may have unused entries - * between the topmost sprite plane and the cursor plane. + * Global legacy plane identifier. Valid only for primary/sprite + * planes on pre-g4x, and only for primary planes on g4x+. */ enum plane { - PLANE_A = 0, + PLANE_A, PLANE_B, PLANE_C, - PLANE_CURSOR, - I915_MAX_PLANES, }; #define plane_name(p) ((p) + 'A') #define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A') +/* + * Per-pipe plane identifier. + * I915_MAX_PLANES in the enum below is the maximum (across all platforms) + * number of planes per CRTC. Not all platforms really have this many planes, + * which means some arrays of size I915_MAX_PLANES may have unused entries + * between the topmost sprite plane and the cursor plane. + * + * This is expected to be passed to various register macros + * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care. + */ +enum plane_id { + PLANE_PRIMARY, + PLANE_SPRITE0, + PLANE_SPRITE1, + PLANE_CURSOR, + I915_MAX_PLANES, +}; + enum port { PORT_NONE = -1, PORT_A = 0, diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 8d270f7650de..42442b54b355 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -14988,6 +14988,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) primary->plane = (enum plane) !pipe; else primary->plane = (enum plane) pipe; + primary->id = PLANE_PRIMARY; primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe); primary->check_plane = intel_check_primary_plane; @@ -15187,6 +15188,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) cursor->max_downscale = 1; cursor->pipe = pipe; cursor->plane = pipe; + cursor->id = PLANE_CURSOR; cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe); cursor->check_plane = intel_check_cursor_plane; cursor->update_plane = intel_update_cursor_plane; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index d16590481ce4..c17eebbdb7a2 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -766,7 +766,8 @@ struct intel_plane_wm_parameters { struct intel_plane { struct drm_plane base; - int plane; + u8 plane; + enum plane_id id; enum pipe pipe; bool can_scale; int max_downscale; diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index c2b629c922e7..bc86881b0484 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -1112,6 +1112,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, intel_plane->pipe = pipe; intel_plane->plane = plane; + intel_plane->id = PLANE_SPRITE0 + plane; intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane); intel_plane->check_plane = intel_check_sprite_plane; -- cgit v1.2.3 From d97d7b48b644378349f2566dd83739e0be3ba410 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 22 Nov 2016 18:01:57 +0200 Subject: drm/i915: Add crtc->plane_ids_mask MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a mask of which planes are available for each pipe. This doesn't quite work for old platforms with dynamic plane<->pipe assignment, but as we don't support that sort of stuff (yet) we can get away with it. The main use I have for this is the for_each_plane_id_on_crtc() macro for iterating over all possible planes on the crtc. I suppose we could not add the mask, and instead iterate by comparing intel_plane->pipe but then we'd need a local intel_plane variable which is just unnecessary clutter in some cases. But I'm not hung up on this, so if people prefer the other option I could be convinced to use it. v2: Use BIT() in the iterator macro too (Paulo) Signed-off-by: Ville Syrjälä Reviewed-by: Paulo Zanoni Link: http://patchwork.freedesktop.org/patch/msgid/1479830524-7882-3-git-send-email-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/i915_drv.h | 4 ++++ drivers/gpu/drm/i915/intel_display.c | 3 +++ drivers/gpu/drm/i915/intel_drv.h | 3 ++- 3 files changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b12296ef0b4f..239d88cf637c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -210,6 +210,10 @@ enum plane_id { I915_MAX_PLANES, }; +#define for_each_plane_id_on_crtc(__crtc, __p) \ + for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \ + for_each_if ((__crtc)->plane_ids_mask & BIT(__p)) + enum port { PORT_NONE = -1, PORT_A = 0, diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 42442b54b355..af836720cccd 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -15277,6 +15277,7 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) ret = PTR_ERR(primary); goto fail; } + intel_crtc->plane_ids_mask |= BIT(primary->id); for_each_sprite(dev_priv, pipe, sprite) { struct intel_plane *plane; @@ -15286,6 +15287,7 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) ret = PTR_ERR(plane); goto fail; } + intel_crtc->plane_ids_mask |= BIT(plane->id); } cursor = intel_cursor_plane_create(dev_priv, pipe); @@ -15293,6 +15295,7 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) ret = PTR_ERR(cursor); goto fail; } + intel_crtc->plane_ids_mask |= BIT(cursor->id); ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base, &primary->base, &cursor->base, diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index c17eebbdb7a2..beafff253615 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -691,8 +691,9 @@ struct intel_crtc { * some outputs connected to this crtc. */ bool active; - unsigned long enabled_power_domains; bool lowfreq_avail; + u8 plane_ids_mask; + unsigned long enabled_power_domains; struct intel_overlay *overlay; struct intel_flip_work *flip_work; -- cgit v1.2.3 From d5cdfdf54ea5bcc454a2804301ae5342db0ff0c3 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 22 Nov 2016 18:01:58 +0200 Subject: drm/i915: Use enum plane_id in SKL wm code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Nuke skl_wm_plane_id() and just use the new intel_plane->id. v2: Convert skl_write_plane_wm() as well v3: Convert skl_pipe_wm_get_hw_state() correctly v4: Rebase due to changes in the wm code Drop the cursor FIXME from the total data rate calc (Paulo) Use the "[PLANE:%d:%s]" format in debug print (Paulo) Cc: Matt Roper Cc: Paulo Zanoni Cc: Maarten Lankhorst Cc: Lyude Signed-off-by: Ville Syrjälä Reviewed-by: Paulo Zanoni Link: http://patchwork.freedesktop.org/patch/msgid/1479830524-7882-4-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: Lyude --- drivers/gpu/drm/i915/intel_pm.c | 180 +++++++++++++++++----------------------- 1 file changed, 77 insertions(+), 103 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index cbd0f3269b2d..5b7d306a1947 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2866,28 +2866,6 @@ bool ilk_disable_lp_wm(struct drm_device *dev) #define SKL_SAGV_BLOCK_TIME 30 /* µs */ -/* - * Return the index of a plane in the SKL DDB and wm result arrays. Primary - * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and - * other universal planes are in indices 1..n. Note that this may leave unused - * indices between the top "sprite" plane and the cursor. - */ -static int -skl_wm_plane_id(const struct intel_plane *plane) -{ - switch (plane->base.type) { - case DRM_PLANE_TYPE_PRIMARY: - return 0; - case DRM_PLANE_TYPE_CURSOR: - return PLANE_CURSOR; - case DRM_PLANE_TYPE_OVERLAY: - return plane->plane + 1; - default: - MISSING_CASE(plane->base.type); - return plane->plane; - } -} - /* * FIXME: We still don't have the proper code detect if we need to apply the WA, * so assume we'll always need it in order to avoid underruns. @@ -3026,7 +3004,6 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) struct intel_crtc *crtc; struct intel_plane *plane; struct intel_crtc_state *cstate; - struct skl_plane_wm *wm; enum pipe pipe; int level, latency; @@ -3053,7 +3030,8 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) return false; for_each_intel_plane_on_crtc(dev, crtc, plane) { - wm = &cstate->wm.skl.optimal.planes[skl_wm_plane_id(plane)]; + struct skl_plane_wm *wm = + &cstate->wm.skl.optimal.planes[plane->id]; /* Skip this plane if it's not enabled */ if (!wm->wm[0].plane_en) @@ -3156,28 +3134,29 @@ static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg) void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, struct skl_ddb_allocation *ddb /* out */) { - enum pipe pipe; - int plane; - u32 val; + struct intel_crtc *crtc; memset(ddb, 0, sizeof(*ddb)); - for_each_pipe(dev_priv, pipe) { + for_each_intel_crtc(&dev_priv->drm, crtc) { enum intel_display_power_domain power_domain; + enum plane_id plane_id; + enum pipe pipe = crtc->pipe; power_domain = POWER_DOMAIN_PIPE(pipe); if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) continue; - for_each_universal_plane(dev_priv, pipe, plane) { - val = I915_READ(PLANE_BUF_CFG(pipe, plane)); - skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane], - val); - } + for_each_plane_id_on_crtc(crtc, plane_id) { + u32 val; + + if (plane_id != PLANE_CURSOR) + val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); + else + val = I915_READ(CUR_BUF_CFG(pipe)); - val = I915_READ(CUR_BUF_CFG(pipe)); - skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR], - val); + skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane_id], val); + } intel_display_power_put(dev_priv, power_domain); } @@ -3278,30 +3257,28 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, struct drm_crtc_state *cstate = &intel_cstate->base; struct drm_atomic_state *state = cstate->state; struct drm_plane *plane; - const struct intel_plane *intel_plane; const struct drm_plane_state *pstate; - unsigned int rate, total_data_rate = 0; - int id; + unsigned int total_data_rate = 0; if (WARN_ON(!state)) return 0; /* Calculate and cache data rate for each plane */ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) { - id = skl_wm_plane_id(to_intel_plane(plane)); - intel_plane = to_intel_plane(plane); + enum plane_id plane_id = to_intel_plane(plane)->id; + unsigned int rate; /* packed/uv */ rate = skl_plane_relative_data_rate(intel_cstate, pstate, 0); - plane_data_rate[id] = rate; + plane_data_rate[plane_id] = rate; total_data_rate += rate; /* y-plane */ rate = skl_plane_relative_data_rate(intel_cstate, pstate, 1); - plane_y_data_rate[id] = rate; + plane_y_data_rate[plane_id] = rate; total_data_rate += rate; } @@ -3380,17 +3357,16 @@ skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active, struct drm_plane *plane; drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) { - struct intel_plane *intel_plane = to_intel_plane(plane); - int id = skl_wm_plane_id(intel_plane); + enum plane_id plane_id = to_intel_plane(plane)->id; - if (id == PLANE_CURSOR) + if (plane_id == PLANE_CURSOR) continue; if (!pstate->visible) continue; - minimum[id] = skl_ddb_min_alloc(pstate, 0); - y_minimum[id] = skl_ddb_min_alloc(pstate, 1); + minimum[plane_id] = skl_ddb_min_alloc(pstate, 0); + y_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1); } minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active); @@ -3410,8 +3386,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, uint16_t minimum[I915_MAX_PLANES] = {}; uint16_t y_minimum[I915_MAX_PLANES] = {}; unsigned int total_data_rate; + enum plane_id plane_id; int num_active; - int id, i; unsigned plane_data_rate[I915_MAX_PLANES] = {}; unsigned plane_y_data_rate[I915_MAX_PLANES] = {}; @@ -3442,9 +3418,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, * proportional to the data rate. */ - for (i = 0; i < I915_MAX_PLANES; i++) { - alloc_size -= minimum[i]; - alloc_size -= y_minimum[i]; + for_each_plane_id_on_crtc(intel_crtc, plane_id) { + alloc_size -= minimum[plane_id]; + alloc_size -= y_minimum[plane_id]; } ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR]; @@ -3463,28 +3439,28 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, return 0; start = alloc->start; - for (id = 0; id < I915_MAX_PLANES; id++) { + for_each_plane_id_on_crtc(intel_crtc, plane_id) { unsigned int data_rate, y_data_rate; uint16_t plane_blocks, y_plane_blocks = 0; - if (id == PLANE_CURSOR) + if (plane_id == PLANE_CURSOR) continue; - data_rate = plane_data_rate[id]; + data_rate = plane_data_rate[plane_id]; /* * allocation for (packed formats) or (uv-plane part of planar format): * promote the expression to 64 bits to avoid overflowing, the * result is < available as data_rate / total_data_rate < 1 */ - plane_blocks = minimum[id]; + plane_blocks = minimum[plane_id]; plane_blocks += div_u64((uint64_t)alloc_size * data_rate, total_data_rate); /* Leave disabled planes at (0,0) */ if (data_rate) { - ddb->plane[pipe][id].start = start; - ddb->plane[pipe][id].end = start + plane_blocks; + ddb->plane[pipe][plane_id].start = start; + ddb->plane[pipe][plane_id].end = start + plane_blocks; } start += plane_blocks; @@ -3492,15 +3468,15 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, /* * allocation for y_plane part of planar format: */ - y_data_rate = plane_y_data_rate[id]; + y_data_rate = plane_y_data_rate[plane_id]; - y_plane_blocks = y_minimum[id]; + y_plane_blocks = y_minimum[plane_id]; y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate, total_data_rate); if (y_data_rate) { - ddb->y_plane[pipe][id].start = start; - ddb->y_plane[pipe][id].end = start + y_plane_blocks; + ddb->y_plane[pipe][plane_id].start = start; + ddb->y_plane[pipe][plane_id].end = start + y_plane_blocks; } start += y_plane_blocks; @@ -3692,12 +3668,12 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, if (level) { return 0; } else { + struct drm_plane *plane = pstate->plane; + DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n"); - DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n", - to_intel_crtc(cstate->base.crtc)->pipe, - skl_wm_plane_id(to_intel_plane(pstate->plane)), + DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n", + plane->base.id, plane->name, res_blocks, ddb_allocation, res_lines); - return -EINVAL; } } @@ -3724,7 +3700,6 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv, uint16_t ddb_blocks; enum pipe pipe = intel_crtc->pipe; int ret; - int i = skl_wm_plane_id(intel_plane); if (state) intel_pstate = @@ -3747,7 +3722,7 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv, WARN_ON(!intel_pstate->base.fb); - ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]); + ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][intel_plane->id]); ret = skl_compute_plane_wm(dev_priv, cstate, @@ -3810,7 +3785,7 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate, for_each_intel_plane_mask(&dev_priv->drm, intel_plane, cstate->base.plane_mask) { - wm = &pipe_wm->planes[skl_wm_plane_id(intel_plane)]; + wm = &pipe_wm->planes[intel_plane->id]; for (level = 0; level <= max_level; level++) { ret = skl_compute_wm_level(dev_priv, ddb, cstate, @@ -3854,7 +3829,7 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv, static void skl_write_plane_wm(struct intel_crtc *intel_crtc, const struct skl_plane_wm *wm, const struct skl_ddb_allocation *ddb, - int plane) + enum plane_id plane_id) { struct drm_crtc *crtc = &intel_crtc->base; struct drm_device *dev = crtc->dev; @@ -3863,16 +3838,16 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc, enum pipe pipe = intel_crtc->pipe; for (level = 0; level <= max_level; level++) { - skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane, level), + skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level), &wm->wm[level]); } - skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane), + skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id), &wm->trans_wm); - skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane), - &ddb->plane[pipe][plane]); - skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane), - &ddb->y_plane[pipe][plane]); + skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), + &ddb->plane[pipe][plane_id]); + skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane_id), + &ddb->y_plane[pipe][plane_id]); } static void skl_write_cursor_wm(struct intel_crtc *intel_crtc, @@ -3977,17 +3952,16 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate) struct drm_plane_state *plane_state; struct drm_plane *plane; enum pipe pipe = intel_crtc->pipe; - int id; WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc)); drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) { - id = skl_wm_plane_id(to_intel_plane(plane)); + enum plane_id plane_id = to_intel_plane(plane)->id; - if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][id], - &new_ddb->plane[pipe][id]) && - skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][id], - &new_ddb->y_plane[pipe][id])) + if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id], + &new_ddb->plane[pipe][plane_id]) && + skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][plane_id], + &new_ddb->y_plane[pipe][plane_id])) continue; plane_state = drm_atomic_get_plane_state(state, plane); @@ -4099,7 +4073,6 @@ skl_print_wm_changes(const struct drm_atomic_state *state) const struct intel_plane *intel_plane; const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb; const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb; - int id; int i; for_each_crtc_in_state(state, crtc, cstate, i) { @@ -4107,11 +4080,11 @@ skl_print_wm_changes(const struct drm_atomic_state *state) enum pipe pipe = intel_crtc->pipe; for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { + enum plane_id plane_id = intel_plane->id; const struct skl_ddb_entry *old, *new; - id = skl_wm_plane_id(intel_plane); - old = &old_ddb->plane[pipe][id]; - new = &new_ddb->plane[pipe][id]; + old = &old_ddb->plane[pipe][plane_id]; + new = &new_ddb->plane[pipe][plane_id]; if (skl_ddb_entry_equal(old, new)) continue; @@ -4201,17 +4174,21 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state, struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; const struct skl_ddb_allocation *ddb = &state->wm_results.ddb; enum pipe pipe = crtc->pipe; - int plane; + enum plane_id plane_id; if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base))) return; I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime); - for_each_universal_plane(dev_priv, pipe, plane) - skl_write_plane_wm(crtc, &pipe_wm->planes[plane], ddb, plane); - - skl_write_cursor_wm(crtc, &pipe_wm->planes[PLANE_CURSOR], ddb); + for_each_plane_id_on_crtc(crtc, plane_id) { + if (plane_id != PLANE_CURSOR) + skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id], + ddb, plane_id); + else + skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id], + ddb); + } } static void skl_initial_wm(struct intel_atomic_state *state, @@ -4326,32 +4303,29 @@ static inline void skl_wm_level_from_reg_val(uint32_t val, void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc, struct skl_pipe_wm *out) { - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_plane *intel_plane; - struct skl_plane_wm *wm; enum pipe pipe = intel_crtc->pipe; - int level, id, max_level; + int level, max_level; + enum plane_id plane_id; uint32_t val; max_level = ilk_wm_max_level(dev_priv); - for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { - id = skl_wm_plane_id(intel_plane); - wm = &out->planes[id]; + for_each_plane_id_on_crtc(intel_crtc, plane_id) { + struct skl_plane_wm *wm = &out->planes[plane_id]; for (level = 0; level <= max_level; level++) { - if (id != PLANE_CURSOR) - val = I915_READ(PLANE_WM(pipe, id, level)); + if (plane_id != PLANE_CURSOR) + val = I915_READ(PLANE_WM(pipe, plane_id, level)); else val = I915_READ(CUR_WM(pipe, level)); skl_wm_level_from_reg_val(val, &wm->wm[level]); } - if (id != PLANE_CURSOR) - val = I915_READ(PLANE_WM_TRANS(pipe, id)); + if (plane_id != PLANE_CURSOR) + val = I915_READ(PLANE_WM_TRANS(pipe, plane_id)); else val = I915_READ(CUR_WM_TRANS(pipe)); -- cgit v1.2.3 From 8e816bb496763182d5733ef955ac8ca7334480f6 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 22 Nov 2016 18:01:59 +0200 Subject: drm/i915: Use enum plane_id in SKL plane code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace the intel_plane->plane and hardcoded 0 usage in the SKL plane code with intel_plane->id. This should make the SKL "primary" and "sprite" code virtually identical, so the next logical step would likely be dropping one of the copies. Signed-off-by: Ville Syrjälä Reviewed-by: Paulo Zanoni Link: http://patchwork.freedesktop.org/patch/msgid/1479830524-7882-5-git-send-email-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/intel_display.c | 31 +++++++++++++------------- drivers/gpu/drm/i915/intel_sprite.c | 42 ++++++++++++++++++------------------ 2 files changed, 37 insertions(+), 36 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index af836720cccd..30812cd035e6 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3378,7 +3378,8 @@ static void skylake_update_primary_plane(struct drm_plane *plane, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_framebuffer *fb = plane_state->base.fb; - int pipe = intel_crtc->pipe; + enum plane_id plane_id = to_intel_plane(plane)->id; + enum pipe pipe = to_intel_plane(plane)->pipe; u32 plane_ctl; unsigned int rotation = plane_state->base.rotation; u32 stride = skl_plane_stride(fb, 0, rotation); @@ -3413,30 +3414,30 @@ static void skylake_update_primary_plane(struct drm_plane *plane, intel_crtc->adjusted_x = src_x; intel_crtc->adjusted_y = src_y; - I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); - I915_WRITE(PLANE_OFFSET(pipe, 0), (src_y << 16) | src_x); - I915_WRITE(PLANE_STRIDE(pipe, 0), stride); - I915_WRITE(PLANE_SIZE(pipe, 0), (src_h << 16) | src_w); + I915_WRITE(PLANE_CTL(pipe, plane_id), plane_ctl); + I915_WRITE(PLANE_OFFSET(pipe, plane_id), (src_y << 16) | src_x); + I915_WRITE(PLANE_STRIDE(pipe, plane_id), stride); + I915_WRITE(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w); if (scaler_id >= 0) { uint32_t ps_ctrl = 0; WARN_ON(!dst_w || !dst_h); - ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) | + ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane_id) | crtc_state->scaler_state.scalers[scaler_id].mode; I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0); I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y); I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h); - I915_WRITE(PLANE_POS(pipe, 0), 0); + I915_WRITE(PLANE_POS(pipe, plane_id), 0); } else { - I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x); + I915_WRITE(PLANE_POS(pipe, plane_id), (dst_y << 16) | dst_x); } - I915_WRITE(PLANE_SURF(pipe, 0), + I915_WRITE(PLANE_SURF(pipe, plane_id), intel_fb_gtt_offset(fb, rotation) + surf_addr); - POSTING_READ(PLANE_SURF(pipe, 0)); + POSTING_READ(PLANE_SURF(pipe, plane_id)); } static void skylake_disable_primary_plane(struct drm_plane *primary, @@ -3444,12 +3445,12 @@ static void skylake_disable_primary_plane(struct drm_plane *primary, { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; + enum plane_id plane_id = to_intel_plane(primary)->id; + enum pipe pipe = to_intel_plane(primary)->pipe; - I915_WRITE(PLANE_CTL(pipe, 0), 0); - I915_WRITE(PLANE_SURF(pipe, 0), 0); - POSTING_READ(PLANE_SURF(pipe, 0)); + I915_WRITE(PLANE_CTL(pipe, plane_id), 0); + I915_WRITE(PLANE_SURF(pipe, plane_id), 0); + POSTING_READ(PLANE_SURF(pipe, plane_id)); } /* Assume fb object is pinned & idle & fenced and just update base pointers */ diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index bc86881b0484..4ddb17cbe45f 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -203,8 +203,8 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(drm_plane); struct drm_framebuffer *fb = plane_state->base.fb; - const int pipe = intel_plane->pipe; - const int plane = intel_plane->plane + 1; + enum plane_id plane_id = intel_plane->id; + enum pipe pipe = intel_plane->pipe; u32 plane_ctl; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u32 surf_addr = plane_state->main.offset; @@ -229,9 +229,9 @@ skl_update_plane(struct drm_plane *drm_plane, plane_ctl |= skl_plane_ctl_rotation(rotation); if (key->flags) { - I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value); - I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value); - I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask); + I915_WRITE(PLANE_KEYVAL(pipe, plane_id), key->min_value); + I915_WRITE(PLANE_KEYMAX(pipe, plane_id), key->max_value); + I915_WRITE(PLANE_KEYMSK(pipe, plane_id), key->channel_mask); } if (key->flags & I915_SET_COLORKEY_DESTINATION) @@ -245,36 +245,36 @@ skl_update_plane(struct drm_plane *drm_plane, crtc_w--; crtc_h--; - I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x); - I915_WRITE(PLANE_STRIDE(pipe, plane), stride); - I915_WRITE(PLANE_SIZE(pipe, plane), (src_h << 16) | src_w); + I915_WRITE(PLANE_OFFSET(pipe, plane_id), (y << 16) | x); + I915_WRITE(PLANE_STRIDE(pipe, plane_id), stride); + I915_WRITE(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w); /* program plane scaler */ if (plane_state->scaler_id >= 0) { int scaler_id = plane_state->scaler_id; const struct intel_scaler *scaler; - DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane, - PS_PLANE_SEL(plane)); + DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", + plane_id, PS_PLANE_SEL(plane_id)); scaler = &crtc_state->scaler_state.scalers[scaler_id]; I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), - PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode); + PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode); I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0); I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y); I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), ((crtc_w + 1) << 16)|(crtc_h + 1)); - I915_WRITE(PLANE_POS(pipe, plane), 0); + I915_WRITE(PLANE_POS(pipe, plane_id), 0); } else { - I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x); + I915_WRITE(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x); } - I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl); - I915_WRITE(PLANE_SURF(pipe, plane), + I915_WRITE(PLANE_CTL(pipe, plane_id), plane_ctl); + I915_WRITE(PLANE_SURF(pipe, plane_id), intel_fb_gtt_offset(fb, rotation) + surf_addr); - POSTING_READ(PLANE_SURF(pipe, plane)); + POSTING_READ(PLANE_SURF(pipe, plane_id)); } static void @@ -283,13 +283,13 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) struct drm_device *dev = dplane->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(dplane); - const int pipe = intel_plane->pipe; - const int plane = intel_plane->plane + 1; + enum plane_id plane_id = intel_plane->id; + enum pipe pipe = intel_plane->pipe; - I915_WRITE(PLANE_CTL(pipe, plane), 0); + I915_WRITE(PLANE_CTL(pipe, plane_id), 0); - I915_WRITE(PLANE_SURF(pipe, plane), 0); - POSTING_READ(PLANE_SURF(pipe, plane)); + I915_WRITE(PLANE_SURF(pipe, plane_id), 0); + POSTING_READ(PLANE_SURF(pipe, plane_id)); } static void -- cgit v1.2.3 From 83c04a62a187283d9e1473d062cd1cd25ee60e2e Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 22 Nov 2016 18:02:00 +0200 Subject: drm/i915: Use enum plane_id in VLV/CHV sprite code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use intel_plane->id to derive the VLV/CHV sprite register offsets instead of abusing plane->plane which is really meant to for primary planes only. v2: Convert assert_sprites_disabled() over as well v3: Rename the reg macro parameter to 'plane_id' as well (Paulo) Signed-off-by: Ville Syrjälä Reviewed-by: Paulo Zanoni Link: http://patchwork.freedesktop.org/patch/msgid/1479830524-7882-6-git-send-email-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/i915_reg.h | 58 +++++++++++++++------------- drivers/gpu/drm/i915/intel_display.c | 2 +- drivers/gpu/drm/i915/intel_sprite.c | 74 ++++++++++++++++++------------------ 3 files changed, 70 insertions(+), 64 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index fcad8fa41274..329848743819 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -5711,18 +5711,21 @@ enum { #define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8) #define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4) -#define SPCNTR(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPACNTR, _SPBCNTR) -#define SPLINOFF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPALINOFF, _SPBLINOFF) -#define SPSTRIDE(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASTRIDE, _SPBSTRIDE) -#define SPPOS(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAPOS, _SPBPOS) -#define SPSIZE(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASIZE, _SPBSIZE) -#define SPKEYMINVAL(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMINVAL, _SPBKEYMINVAL) -#define SPKEYMSK(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMSK, _SPBKEYMSK) -#define SPSURF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASURF, _SPBSURF) -#define SPKEYMAXVAL(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMAXVAL, _SPBKEYMAXVAL) -#define SPTILEOFF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPATILEOFF, _SPBTILEOFF) -#define SPCONSTALPHA(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPACONSTALPHA, _SPBCONSTALPHA) -#define SPGAMC(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAGAMC, _SPBGAMC) +#define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \ + _MMIO_PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b)) + +#define SPCNTR(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACNTR, _SPBCNTR) +#define SPLINOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPALINOFF, _SPBLINOFF) +#define SPSTRIDE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASTRIDE, _SPBSTRIDE) +#define SPPOS(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAPOS, _SPBPOS) +#define SPSIZE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASIZE, _SPBSIZE) +#define SPKEYMINVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMINVAL, _SPBKEYMINVAL) +#define SPKEYMSK(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMSK, _SPBKEYMSK) +#define SPSURF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURF, _SPBSURF) +#define SPKEYMAXVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMAXVAL, _SPBKEYMAXVAL) +#define SPTILEOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPATILEOFF, _SPBTILEOFF) +#define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA) +#define SPGAMC(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC) /* * CHV pipe B sprite CSC @@ -5731,29 +5734,32 @@ enum { * |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff| * |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff| */ -#define SPCSCYGOFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d900 + (sprite) * 0x1000) -#define SPCSCCBOFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d904 + (sprite) * 0x1000) -#define SPCSCCROFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d908 + (sprite) * 0x1000) +#define _MMIO_CHV_SPCSC(plane_id, reg) \ + _MMIO(VLV_DISPLAY_BASE + ((plane_id) - PLANE_SPRITE0) * 0x1000 + (reg)) + +#define SPCSCYGOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d900) +#define SPCSCCBOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d904) +#define SPCSCCROFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d908) #define SPCSC_OOFF(x) (((x) & 0x7ff) << 16) /* s11 */ #define SPCSC_IOFF(x) (((x) & 0x7ff) << 0) /* s11 */ -#define SPCSCC01(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d90c + (sprite) * 0x1000) -#define SPCSCC23(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d910 + (sprite) * 0x1000) -#define SPCSCC45(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d914 + (sprite) * 0x1000) -#define SPCSCC67(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d918 + (sprite) * 0x1000) -#define SPCSCC8(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d91c + (sprite) * 0x1000) +#define SPCSCC01(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d90c) +#define SPCSCC23(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d910) +#define SPCSCC45(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d914) +#define SPCSCC67(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d918) +#define SPCSCC8(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d91c) #define SPCSC_C1(x) (((x) & 0x7fff) << 16) /* s3.12 */ #define SPCSC_C0(x) (((x) & 0x7fff) << 0) /* s3.12 */ -#define SPCSCYGICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d920 + (sprite) * 0x1000) -#define SPCSCCBICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d924 + (sprite) * 0x1000) -#define SPCSCCRICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d928 + (sprite) * 0x1000) +#define SPCSCYGICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d920) +#define SPCSCCBICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d924) +#define SPCSCCRICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d928) #define SPCSC_IMAX(x) (((x) & 0x7ff) << 16) /* s11 */ #define SPCSC_IMIN(x) (((x) & 0x7ff) << 0) /* s11 */ -#define SPCSCYGOCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d92c + (sprite) * 0x1000) -#define SPCSCCBOCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d930 + (sprite) * 0x1000) -#define SPCSCCROCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d934 + (sprite) * 0x1000) +#define SPCSCYGOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d92c) +#define SPCSCCBOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d930) +#define SPCSCCROCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d934) #define SPCSC_OMAX(x) ((x) << 16) /* u10 */ #define SPCSC_OMIN(x) ((x) << 0) /* u10 */ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 30812cd035e6..f1966fe3dfbe 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1327,7 +1327,7 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv, } } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { for_each_sprite(dev_priv, pipe, sprite) { - u32 val = I915_READ(SPCNTR(pipe, sprite)); + u32 val = I915_READ(SPCNTR(pipe, PLANE_SPRITE0 + sprite)); I915_STATE_WARN(val & SP_ENABLE, "sprite %c assertion failure, should be off on pipe %c but is still active\n", sprite_name(pipe, sprite), pipe_name(pipe)); diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 4ddb17cbe45f..91bcaff28f35 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -296,7 +296,7 @@ static void chv_update_csc(struct intel_plane *intel_plane, uint32_t format) { struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); - int plane = intel_plane->plane; + enum plane_id plane_id = intel_plane->id; /* Seems RGB data bypasses the CSC always */ if (!format_is_yuv(format)) @@ -312,23 +312,23 @@ chv_update_csc(struct intel_plane *intel_plane, uint32_t format) * Cb and Cr apparently come in as signed already, so no * need for any offset. For Y we need to remove the offset. */ - I915_WRITE(SPCSCYGOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(-64)); - I915_WRITE(SPCSCCBOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0)); - I915_WRITE(SPCSCCROFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0)); - - I915_WRITE(SPCSCC01(plane), SPCSC_C1(4769) | SPCSC_C0(6537)); - I915_WRITE(SPCSCC23(plane), SPCSC_C1(-3330) | SPCSC_C0(0)); - I915_WRITE(SPCSCC45(plane), SPCSC_C1(-1605) | SPCSC_C0(4769)); - I915_WRITE(SPCSCC67(plane), SPCSC_C1(4769) | SPCSC_C0(0)); - I915_WRITE(SPCSCC8(plane), SPCSC_C0(8263)); - - I915_WRITE(SPCSCYGICLAMP(plane), SPCSC_IMAX(940) | SPCSC_IMIN(64)); - I915_WRITE(SPCSCCBICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448)); - I915_WRITE(SPCSCCRICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448)); - - I915_WRITE(SPCSCYGOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); - I915_WRITE(SPCSCCBOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); - I915_WRITE(SPCSCCROCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); + I915_WRITE(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(-64)); + I915_WRITE(SPCSCCBOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0)); + I915_WRITE(SPCSCCROFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0)); + + I915_WRITE(SPCSCC01(plane_id), SPCSC_C1(4769) | SPCSC_C0(6537)); + I915_WRITE(SPCSCC23(plane_id), SPCSC_C1(-3330) | SPCSC_C0(0)); + I915_WRITE(SPCSCC45(plane_id), SPCSC_C1(-1605) | SPCSC_C0(4769)); + I915_WRITE(SPCSCC67(plane_id), SPCSC_C1(4769) | SPCSC_C0(0)); + I915_WRITE(SPCSCC8(plane_id), SPCSC_C0(8263)); + + I915_WRITE(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(940) | SPCSC_IMIN(64)); + I915_WRITE(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448)); + I915_WRITE(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448)); + + I915_WRITE(SPCSCYGOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); + I915_WRITE(SPCSCCBOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); + I915_WRITE(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); } static void @@ -340,8 +340,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(dplane); struct drm_framebuffer *fb = plane_state->base.fb; - int pipe = intel_plane->pipe; - int plane = intel_plane->plane; + enum pipe pipe = intel_plane->pipe; + enum plane_id plane_id = intel_plane->id; u32 sprctl; u32 sprsurf_offset, linear_offset; unsigned int rotation = plane_state->base.rotation; @@ -434,9 +434,9 @@ vlv_update_plane(struct drm_plane *dplane, linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); if (key->flags) { - I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value); - I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value); - I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask); + I915_WRITE(SPKEYMINVAL(pipe, plane_id), key->min_value); + I915_WRITE(SPKEYMAXVAL(pipe, plane_id), key->max_value); + I915_WRITE(SPKEYMSK(pipe, plane_id), key->channel_mask); } if (key->flags & I915_SET_COLORKEY_SOURCE) @@ -445,21 +445,21 @@ vlv_update_plane(struct drm_plane *dplane, if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) chv_update_csc(intel_plane, fb->pixel_format); - I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]); - I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x); + I915_WRITE(SPSTRIDE(pipe, plane_id), fb->pitches[0]); + I915_WRITE(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x); if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) - I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x); + I915_WRITE(SPTILEOFF(pipe, plane_id), (y << 16) | x); else - I915_WRITE(SPLINOFF(pipe, plane), linear_offset); + I915_WRITE(SPLINOFF(pipe, plane_id), linear_offset); - I915_WRITE(SPCONSTALPHA(pipe, plane), 0); + I915_WRITE(SPCONSTALPHA(pipe, plane_id), 0); - I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w); - I915_WRITE(SPCNTR(pipe, plane), sprctl); - I915_WRITE(SPSURF(pipe, plane), + I915_WRITE(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w); + I915_WRITE(SPCNTR(pipe, plane_id), sprctl); + I915_WRITE(SPSURF(pipe, plane_id), intel_fb_gtt_offset(fb, rotation) + sprsurf_offset); - POSTING_READ(SPSURF(pipe, plane)); + POSTING_READ(SPSURF(pipe, plane_id)); } static void @@ -468,13 +468,13 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) struct drm_device *dev = dplane->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(dplane); - int pipe = intel_plane->pipe; - int plane = intel_plane->plane; + enum pipe pipe = intel_plane->pipe; + enum plane_id plane_id = intel_plane->id; - I915_WRITE(SPCNTR(pipe, plane), 0); + I915_WRITE(SPCNTR(pipe, plane_id), 0); - I915_WRITE(SPSURF(pipe, plane), 0); - POSTING_READ(SPSURF(pipe, plane)); + I915_WRITE(SPSURF(pipe, plane_id), 0); + POSTING_READ(SPSURF(pipe, plane_id)); } static void -- cgit v1.2.3 From 49845a231587f1d233a93208c117b5205619f85f Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 22 Nov 2016 18:02:01 +0200 Subject: drm/i915: Use enum plane_id in VLV/CHV wm code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's try not to abuse plane->plane for sprites on VLV/CHV and instead use plane->id. Since out watermark structures aren't entirely plane type agnostic (for now) and start indexing sprites from 0 we'll add a small helper to convert between the two bases. Signed-off-by: Ville Syrjälä Reviewed-by: Paulo Zanoni Link: http://patchwork.freedesktop.org/patch/msgid/1479830524-7882-7-git-send-email-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/intel_pm.c | 73 ++++++++++++++++++++--------------------- 1 file changed, 36 insertions(+), 37 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 5b7d306a1947..bfd2c289ce49 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -370,12 +370,15 @@ static const int pessimal_latency_ns = 5000; #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \ ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8)) -static int vlv_get_fifo_size(struct drm_i915_private *dev_priv, - enum pipe pipe, int plane) +static int vlv_get_fifo_size(struct intel_plane *plane) { + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); int sprite0_start, sprite1_start, size; - switch (pipe) { + if (plane->id == PLANE_CURSOR) + return 63; + + switch (plane->pipe) { uint32_t dsparb, dsparb2, dsparb3; case PIPE_A: dsparb = I915_READ(DSPARB); @@ -399,24 +402,21 @@ static int vlv_get_fifo_size(struct drm_i915_private *dev_priv, return 0; } - switch (plane) { - case 0: + switch (plane->id) { + case PLANE_PRIMARY: size = sprite0_start; break; - case 1: + case PLANE_SPRITE0: size = sprite1_start - sprite0_start; break; - case 2: + case PLANE_SPRITE1: size = 512 - 1 - sprite1_start; break; default: return 0; } - DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n", - pipe_name(pipe), plane == 0 ? "primary" : "sprite", - plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1), - size); + DRM_DEBUG_KMS("%s FIFO size: %d\n", plane->base.name, size); return size; } @@ -1053,6 +1053,12 @@ static void vlv_compute_fifo(struct intel_crtc *crtc) WARN_ON(fifo_left != 0); } +/* FIXME kill me */ +static inline int vlv_sprite_id(enum plane_id plane_id) +{ + return plane_id - PLANE_SPRITE0; +} + static void vlv_invert_wms(struct intel_crtc *crtc) { struct vlv_wm_state *wm_state = &crtc->wm_state; @@ -1079,7 +1085,7 @@ static void vlv_invert_wms(struct intel_crtc *crtc) wm_state->wm[level].primary; break; case DRM_PLANE_TYPE_OVERLAY: - sprite = plane->plane; + sprite = vlv_sprite_id(plane->id); wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size - wm_state->wm[level].sprite[sprite]; break; @@ -1144,7 +1150,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc) wm_state->wm[level].primary = wm; break; case DRM_PLANE_TYPE_OVERLAY: - sprite = plane->plane; + sprite = vlv_sprite_id(plane->id); wm_state->wm[level].sprite[sprite] = wm; break; } @@ -1170,7 +1176,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc) wm_state->wm[level].primary); break; case DRM_PLANE_TYPE_OVERLAY: - sprite = plane->plane; + sprite = vlv_sprite_id(plane->id); for (level = 0; level < wm_state->num_levels; level++) wm_state->sr[level].plane = min(wm_state->sr[level].plane, @@ -1199,17 +1205,23 @@ static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc) int sprite0_start = 0, sprite1_start = 0, fifo_size = 0; for_each_intel_plane_on_crtc(dev, crtc, plane) { - if (plane->base.type == DRM_PLANE_TYPE_CURSOR) { - WARN_ON(plane->wm.fifo_size != 63); - continue; - } - - if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) + switch (plane->id) { + case PLANE_PRIMARY: sprite0_start = plane->wm.fifo_size; - else if (plane->plane == 0) + break; + case PLANE_SPRITE0: sprite1_start = sprite0_start + plane->wm.fifo_size; - else + break; + case PLANE_SPRITE1: fifo_size = sprite1_start + plane->wm.fifo_size; + break; + case PLANE_CURSOR: + WARN_ON(plane->wm.fifo_size != 63); + break; + default: + MISSING_CASE(plane->id); + break; + } } WARN_ON(fifo_size != 512 - 1); @@ -4510,21 +4522,8 @@ void vlv_wm_get_hw_state(struct drm_device *dev) vlv_read_wm_values(dev_priv, wm); - for_each_intel_plane(dev, plane) { - switch (plane->base.type) { - int sprite; - case DRM_PLANE_TYPE_CURSOR: - plane->wm.fifo_size = 63; - break; - case DRM_PLANE_TYPE_PRIMARY: - plane->wm.fifo_size = vlv_get_fifo_size(dev_priv, plane->pipe, 0); - break; - case DRM_PLANE_TYPE_OVERLAY: - sprite = plane->plane; - plane->wm.fifo_size = vlv_get_fifo_size(dev_priv, plane->pipe, sprite + 1); - break; - } - } + for_each_intel_plane(dev, plane) + plane->wm.fifo_size = vlv_get_fifo_size(plane); wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; wm->level = VLV_WM_LEVEL_PM2; -- cgit v1.2.3 From 812b1d2fe527c3c68d04f379aef850dd02db5945 Mon Sep 17 00:00:00 2001 From: Bob Paauwe Date: Mon, 21 Nov 2016 14:24:06 -0800 Subject: drm/i915/bxt: Correct dual-link MIPI port control. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For BXT, there is only one bit that enables/disables dual-link mode and not different bits depending on which pipe is being used. Signed-off-by: Bob Paauwe Link: http://patchwork.freedesktop.org/patch/msgid/1479767046-3964-1-git-send-email-bob.j.paauwe@intel.com Signed-off-by: Ville Syrjälä --- drivers/gpu/drm/i915/intel_dsi.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 4e0d025490a3..a35c1412e43b 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -476,7 +476,10 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder) if (intel_dsi->ports == (BIT(PORT_A) | BIT(PORT_C))) { temp |= (intel_dsi->dual_link - 1) << DUAL_LINK_MODE_SHIFT; - temp |= intel_crtc->pipe ? + if (IS_BROXTON(dev_priv)) + temp |= LANE_CONFIGURATION_DUAL_LINK_A; + else + temp |= intel_crtc->pipe ? LANE_CONFIGURATION_DUAL_LINK_B : LANE_CONFIGURATION_DUAL_LINK_A; } -- cgit v1.2.3 From e0ca7a6be38ce603d26df5707c22e53870a623e0 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 14 Nov 2016 18:35:09 +0200 Subject: drm/i915: Fix cdclk vs. dev_cdclk mess when not recomputing things MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When we end up not recomputing the cdclk, we need to populate intel_state->cdclk with the "atomic_cdclk_freq" instead of the current cdclk_freq. When no pipes are active, the actual cdclk_freq may be lower than what the configuration of the planes and pipes would require from the point of view of the software state. This fixes bogus WARNS from skl_max_scale() which is trying to check the plane software state against the cdclk frequency. So any time it got called during DPMS off for instance, we might have tripped the warn if the current mode would have required a higher than minimum cdclk. v2: Drop the dev_cdclk stuff (Maarten) Cc: Maarten Lankhorst Cc: Mika Kahola Cc: bruno.pagani@ens-lyon.org Cc: Daniel J Blueman Cc: Paul Bolle Cc: Joseph Yasi Tested-by: Paul Bolle Tested-by: Joseph Yasi (v1) Cc: stable@vger.kernel.org Fixes: 1a617b77658e ("drm/i915: Keep track of the cdclk as if all crtc's were active.") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=98214 Signed-off-by: Ville Syrjälä Reviewed-by: Maarten Lankhorst Link: http://patchwork.freedesktop.org/patch/msgid/1479141311-11904-2-git-send-email-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/intel_display.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f1966fe3dfbe..383775e0658a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13995,8 +13995,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state) DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n", intel_state->cdclk, intel_state->dev_cdclk); - } else + } else { to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq; + } intel_modeset_clear_plls(state); @@ -14097,8 +14098,9 @@ static int intel_atomic_check(struct drm_device *dev, if (ret) return ret; - } else - intel_state->cdclk = dev_priv->cdclk_freq; + } else { + intel_state->cdclk = dev_priv->atomic_cdclk_freq; + } ret = drm_atomic_helper_check_planes(dev, state); if (ret) -- cgit v1.2.3 From 8d96561a0a31974fec33bd3f3853d6211b7ab738 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 14 Nov 2016 18:35:10 +0200 Subject: drm/i915: Protect dev_priv->atomic_cdclk_freq with all the crtc locks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A modeset on one pipe can update dev_priv->atomic_cdclk_freq without actually touching the hardware, in which case we won't force a modeset on all the pipes, and thus won't lock any of the other pipes either. That means a parallel plane update on another pipe could be looking at a stale dev_priv->atomic_cdcdlk_freq and thus fail to notice when the plane configuration is invalid, or potentially reject a valid update. To overcome this we must protect writes to atomic_cdclk_freq with all the crtc locks, and thus for reads any single crtc lock will be sufficient protection. Cc: Maarten Lankhorst Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1479141311-11904-3-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 9 +++++++- drivers/gpu/drm/i915/intel_display.c | 41 +++++++++++++++++++++++++++++++----- 2 files changed, 44 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 239d88cf637c..e3f1a4b1b269 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2016,7 +2016,14 @@ struct drm_i915_private { unsigned int fsb_freq, mem_freq, is_ddr3; unsigned int skl_preferred_vco_freq; - unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq; + unsigned int cdclk_freq, max_cdclk_freq; + + /* + * For reading holding any crtc lock is sufficient, + * for writing must hold all of them. + */ + unsigned int atomic_cdclk_freq; + unsigned int max_dotclk_freq; unsigned int rawclk_freq; unsigned int hpll_freq; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 383775e0658a..8290ca962117 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13914,13 +13914,32 @@ static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state) return 0; } +static int intel_lock_all_pipes(struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + + /* Add all pipes to the state */ + for_each_crtc(state->dev, crtc) { + struct drm_crtc_state *crtc_state; + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + } + + return 0; +} + static int intel_modeset_all_pipes(struct drm_atomic_state *state) { struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; int ret = 0; - /* add all active pipes to the state */ + /* + * Add all pipes to the state, and force + * a modeset on all the active ones. + */ for_each_crtc(state->dev, crtc) { crtc_state = drm_atomic_get_crtc_state(state, crtc); if (IS_ERR(crtc_state)) @@ -13986,12 +14005,24 @@ static int intel_modeset_checks(struct drm_atomic_state *state) if (ret < 0) return ret; + /* + * Writes to dev_priv->atomic_cdclk_freq must protected by + * holding all the crtc locks, even if we don't end up + * touching the hardware + */ + if (intel_state->cdclk != dev_priv->atomic_cdclk_freq) { + ret = intel_lock_all_pipes(state); + if (ret < 0) + return ret; + } + + /* All pipes must be switched off while we change the cdclk. */ if (intel_state->dev_cdclk != dev_priv->cdclk_freq || - intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco) + intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco) { ret = intel_modeset_all_pipes(state); - - if (ret < 0) - return ret; + if (ret < 0) + return ret; + } DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n", intel_state->cdclk, intel_state->dev_cdclk); -- cgit v1.2.3 From 9780aad59cf40db6a2ded69f91b11ba366dbd8cf Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 14 Nov 2016 18:35:11 +0200 Subject: drm/i915: Simplify error handling in intel_modeset_all_pipes() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No need for the extra break statements and whatnot, just return the error directly. And tighten the scope of the local variables while at it. Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1479141311-11904-4-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_display.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 8290ca962117..d51d885b2584 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13933,14 +13933,15 @@ static int intel_lock_all_pipes(struct drm_atomic_state *state) static int intel_modeset_all_pipes(struct drm_atomic_state *state) { struct drm_crtc *crtc; - struct drm_crtc_state *crtc_state; - int ret = 0; /* * Add all pipes to the state, and force * a modeset on all the active ones. */ for_each_crtc(state->dev, crtc) { + struct drm_crtc_state *crtc_state; + int ret; + crtc_state = drm_atomic_get_crtc_state(state, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); @@ -13952,14 +13953,14 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state) ret = drm_atomic_add_affected_connectors(state, crtc); if (ret) - break; + return ret; ret = drm_atomic_add_affected_planes(state, crtc); if (ret) - break; + return ret; } - return ret; + return 0; } static int intel_modeset_checks(struct drm_atomic_state *state) -- cgit v1.2.3 From ac2402882f122307e01faee7d948e90a363ed575 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 23 Nov 2016 15:57:00 +0100 Subject: drm/i915: Remove all ->config dereferences from intel_hdmi, v2. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In all cases we can now obtain the relevant crtc_state/conn_state from the relevant callbacks, which means all the ->config accesses can be removed and the code cleaned up. Changes since v1: - cstate -> crtc_state Signed-off-by: Maarten Lankhorst Link: http://patchwork.freedesktop.org/patch/msgid/8b02a6b4-606a-e43a-b357-ad17f491525b@linux.intel.com [mlankhorst: Reinstate missing comment] Reviewed-by: Ville Syrjälä --- drivers/gpu/drm/i915/intel_ddi.c | 9 +- drivers/gpu/drm/i915/intel_drv.h | 4 +- drivers/gpu/drm/i915/intel_hdmi.c | 170 +++++++++++++++++++------------------- 3 files changed, 93 insertions(+), 90 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 10ec9d4b7d45..0d680dabac77 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1701,7 +1701,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, bool has_hdmi_sink, - struct drm_display_mode *adjusted_mode, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state, struct intel_shared_dpll *pll) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); @@ -1721,7 +1722,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, intel_hdmi->set_infoframes(drm_encoder, has_hdmi_sink, - adjusted_mode); + crtc_state, conn_state); } static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder, @@ -1742,8 +1743,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder, } if (type == INTEL_OUTPUT_HDMI) { intel_ddi_pre_enable_hdmi(intel_encoder, - crtc->config->has_hdmi_sink, - &crtc->config->base.adjusted_mode, + pipe_config->has_hdmi_sink, + pipe_config, conn_state, crtc->config->shared_dpll); } } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index beafff253615..9bbe5c53a272 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -842,11 +842,13 @@ struct intel_hdmi { enum hdmi_picture_aspect aspect_ratio; struct intel_connector *attached_connector; void (*write_infoframe)(struct drm_encoder *encoder, + const struct intel_crtc_state *crtc_state, enum hdmi_infoframe_type type, const void *frame, ssize_t len); void (*set_infoframes)(struct drm_encoder *encoder, bool enable, - const struct drm_display_mode *adjusted_mode); + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); bool (*infoframe_enabled)(struct drm_encoder *encoder, const struct intel_crtc_state *pipe_config); }; diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index fb88e32e25a3..a12278740f95 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -133,6 +133,7 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv, } static void g4x_write_infoframe(struct drm_encoder *encoder, + const struct intel_crtc_state *crtc_state, enum hdmi_infoframe_type type, const void *frame, ssize_t len) { @@ -187,13 +188,14 @@ static bool g4x_infoframe_enabled(struct drm_encoder *encoder, } static void ibx_write_infoframe(struct drm_encoder *encoder, + const struct intel_crtc_state *crtc_state, enum hdmi_infoframe_type type, const void *frame, ssize_t len) { const uint32_t *data = frame; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); int i; @@ -246,13 +248,14 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder, } static void cpt_write_infoframe(struct drm_encoder *encoder, + const struct intel_crtc_state *crtc_state, enum hdmi_infoframe_type type, const void *frame, ssize_t len) { const uint32_t *data = frame; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); int i; @@ -303,13 +306,14 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder, } static void vlv_write_infoframe(struct drm_encoder *encoder, + const struct intel_crtc_state *crtc_state, enum hdmi_infoframe_type type, const void *frame, ssize_t len) { const uint32_t *data = frame; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); int i; @@ -361,14 +365,14 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder, } static void hsw_write_infoframe(struct drm_encoder *encoder, + const struct intel_crtc_state *crtc_state, enum hdmi_infoframe_type type, const void *frame, ssize_t len) { const uint32_t *data = frame; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); i915_reg_t data_reg; int i; @@ -425,6 +429,7 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder, * bytes by one. */ static void intel_write_infoframe(struct drm_encoder *encoder, + const struct intel_crtc_state *crtc_state, union hdmi_infoframe *frame) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); @@ -443,26 +448,25 @@ static void intel_write_infoframe(struct drm_encoder *encoder, buffer[3] = 0; len++; - intel_hdmi->write_infoframe(encoder, frame->any.type, buffer, len); + intel_hdmi->write_infoframe(encoder, crtc_state, frame->any.type, buffer, len); } static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, - const struct drm_display_mode *adjusted_mode) + const struct intel_crtc_state *crtc_state) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); union hdmi_infoframe frame; int ret; ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, - adjusted_mode); + &crtc_state->base.adjusted_mode); if (ret < 0) { DRM_ERROR("couldn't fill AVI infoframe\n"); return; } if (intel_hdmi->rgb_quant_range_selectable) { - if (intel_crtc->config->limited_color_range) + if (crtc_state->limited_color_range) frame.avi.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED; else @@ -470,10 +474,11 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, HDMI_QUANTIZATION_RANGE_FULL; } - intel_write_infoframe(encoder, &frame); + intel_write_infoframe(encoder, crtc_state, &frame); } -static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) +static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder, + const struct intel_crtc_state *crtc_state) { union hdmi_infoframe frame; int ret; @@ -486,27 +491,28 @@ static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) frame.spd.sdi = HDMI_SPD_SDI_PC; - intel_write_infoframe(encoder, &frame); + intel_write_infoframe(encoder, crtc_state, &frame); } static void intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder, - const struct drm_display_mode *adjusted_mode) + const struct intel_crtc_state *crtc_state) { union hdmi_infoframe frame; int ret; ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi, - adjusted_mode); + &crtc_state->base.adjusted_mode); if (ret < 0) return; - intel_write_infoframe(encoder, &frame); + intel_write_infoframe(encoder, crtc_state, &frame); } static void g4x_set_infoframes(struct drm_encoder *encoder, bool enable, - const struct drm_display_mode *adjusted_mode) + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); @@ -560,28 +566,22 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, I915_WRITE(reg, val); POSTING_READ(reg); - intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); - intel_hdmi_set_spd_infoframe(encoder); - intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); + intel_hdmi_set_avi_infoframe(encoder, crtc_state); + intel_hdmi_set_spd_infoframe(encoder, crtc_state); + intel_hdmi_set_hdmi_infoframe(encoder, crtc_state); } -static bool hdmi_sink_is_deep_color(struct drm_encoder *encoder) +static bool hdmi_sink_is_deep_color(const struct drm_connector_state *conn_state) { - struct drm_device *dev = encoder->dev; - struct drm_connector *connector; - - WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + struct drm_connector *connector = conn_state->connector; /* * HDMI cloning is only supported on g4x which doesn't * support deep color or GCP infoframes anyway so no * need to worry about multiple HDMI sinks here. */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) - if (connector->encoder == encoder) - return connector->display_info.bpc > 8; - return false; + return connector->display_info.bpc > 8; } /* @@ -627,15 +627,17 @@ static bool gcp_default_phase_possible(int pipe_bpp, mode->crtc_htotal/2 % pixels_per_group == 0); } -static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder) +static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->dev); - struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); i915_reg_t reg; u32 val = 0; if (HAS_DDI(dev_priv)) - reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder); + reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder); else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) reg = VLV_TVIDEO_DIP_GCP(crtc->pipe); else if (HAS_PCH_SPLIT(dev_priv)) @@ -644,12 +646,12 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder) return false; /* Indicate color depth whenever the sink supports deep color */ - if (hdmi_sink_is_deep_color(encoder)) + if (hdmi_sink_is_deep_color(conn_state)) val |= GCP_COLOR_INDICATION; /* Enable default_phase whenever the display mode is suitably aligned */ - if (gcp_default_phase_possible(crtc->config->pipe_bpp, - &crtc->config->base.adjusted_mode)) + if (gcp_default_phase_possible(crtc_state->pipe_bpp, + &crtc_state->base.adjusted_mode)) val |= GCP_DEFAULT_PHASE_ENABLE; I915_WRITE(reg, val); @@ -659,10 +661,11 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder) static void ibx_set_infoframes(struct drm_encoder *encoder, bool enable, - const struct drm_display_mode *adjusted_mode) + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); @@ -698,23 +701,24 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); - if (intel_hdmi_set_gcp_infoframe(encoder)) + if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state)) val |= VIDEO_DIP_ENABLE_GCP; I915_WRITE(reg, val); POSTING_READ(reg); - intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); - intel_hdmi_set_spd_infoframe(encoder); - intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); + intel_hdmi_set_avi_infoframe(encoder, crtc_state); + intel_hdmi_set_spd_infoframe(encoder, crtc_state); + intel_hdmi_set_hdmi_infoframe(encoder, crtc_state); } static void cpt_set_infoframes(struct drm_encoder *encoder, bool enable, - const struct drm_display_mode *adjusted_mode) + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); @@ -740,24 +744,25 @@ static void cpt_set_infoframes(struct drm_encoder *encoder, val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); - if (intel_hdmi_set_gcp_infoframe(encoder)) + if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state)) val |= VIDEO_DIP_ENABLE_GCP; I915_WRITE(reg, val); POSTING_READ(reg); - intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); - intel_hdmi_set_spd_infoframe(encoder); - intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); + intel_hdmi_set_avi_infoframe(encoder, crtc_state); + intel_hdmi_set_spd_infoframe(encoder, crtc_state); + intel_hdmi_set_hdmi_infoframe(encoder, crtc_state); } static void vlv_set_infoframes(struct drm_encoder *encoder, bool enable, - const struct drm_display_mode *adjusted_mode) + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); @@ -792,25 +797,25 @@ static void vlv_set_infoframes(struct drm_encoder *encoder, VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); - if (intel_hdmi_set_gcp_infoframe(encoder)) + if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state)) val |= VIDEO_DIP_ENABLE_GCP; I915_WRITE(reg, val); POSTING_READ(reg); - intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); - intel_hdmi_set_spd_infoframe(encoder); - intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); + intel_hdmi_set_avi_infoframe(encoder, crtc_state); + intel_hdmi_set_spd_infoframe(encoder, crtc_state); + intel_hdmi_set_hdmi_infoframe(encoder, crtc_state); } static void hsw_set_infoframes(struct drm_encoder *encoder, bool enable, - const struct drm_display_mode *adjusted_mode) + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); - i915_reg_t reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder); + i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); u32 val = I915_READ(reg); assert_hdmi_port_disabled(intel_hdmi); @@ -825,15 +830,15 @@ static void hsw_set_infoframes(struct drm_encoder *encoder, return; } - if (intel_hdmi_set_gcp_infoframe(encoder)) + if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state)) val |= VIDEO_DIP_ENABLE_GCP_HSW; I915_WRITE(reg, val); POSTING_READ(reg); - intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); - intel_hdmi_set_spd_infoframe(encoder); - intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); + intel_hdmi_set_avi_infoframe(encoder, crtc_state); + intel_hdmi_set_spd_infoframe(encoder, crtc_state); + intel_hdmi_set_hdmi_infoframe(encoder, crtc_state); } void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) @@ -852,31 +857,32 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) adapter, enable); } -static void intel_hdmi_prepare(struct intel_encoder *encoder) +static void intel_hdmi_prepare(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); - const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; + const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; u32 hdmi_val; intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); hdmi_val = SDVO_ENCODING_HDMI; - if (!HAS_PCH_SPLIT(dev_priv) && crtc->config->limited_color_range) + if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range) hdmi_val |= HDMI_COLOR_RANGE_16_235; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH; - if (crtc->config->pipe_bpp > 24) + if (crtc_state->pipe_bpp > 24) hdmi_val |= HDMI_COLOR_FORMAT_12bpc; else hdmi_val |= SDVO_COLOR_FORMAT_8bpc; - if (crtc->config->has_hdmi_sink) + if (crtc_state->has_hdmi_sink) hdmi_val |= HDMI_MODE_SELECT_HDMI; if (HAS_PCH_CPT(dev_priv)) @@ -979,9 +985,9 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { - struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); - WARN_ON(!crtc->config->has_hdmi_sink); + WARN_ON(!pipe_config->has_hdmi_sink); DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n", pipe_name(crtc->pipe)); intel_audio_codec_enable(encoder, pipe_config, conn_state); @@ -1015,14 +1021,13 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder, { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); u32 temp; temp = I915_READ(intel_hdmi->hdmi_reg); temp |= SDVO_ENABLE; - if (crtc->config->has_audio) + if (pipe_config->has_audio) temp |= SDVO_AUDIO_ENABLE; /* @@ -1066,7 +1071,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder, { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); enum pipe pipe = crtc->pipe; u32 temp; @@ -1128,7 +1133,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder, struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); - struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); u32 temp; temp = I915_READ(intel_hdmi->hdmi_reg); @@ -1170,7 +1175,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder, intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } - intel_hdmi->set_infoframes(&encoder->base, false, NULL); + intel_hdmi->set_infoframes(&encoder->base, false, old_crtc_state, old_conn_state); intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); } @@ -1642,13 +1647,12 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder, struct drm_connector_state *conn_state) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); - const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; - intel_hdmi_prepare(encoder); + intel_hdmi_prepare(encoder, pipe_config); intel_hdmi->set_infoframes(&encoder->base, pipe_config->has_hdmi_sink, - adjusted_mode); + pipe_config, conn_state); } static void vlv_hdmi_pre_enable(struct intel_encoder *encoder, @@ -1659,7 +1663,6 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder, struct intel_hdmi *intel_hdmi = &dport->hdmi; struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; vlv_phy_pre_encoder_enable(encoder); @@ -1669,7 +1672,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder, intel_hdmi->set_infoframes(&encoder->base, pipe_config->has_hdmi_sink, - adjusted_mode); + pipe_config, conn_state); g4x_enable_hdmi(encoder, pipe_config, conn_state); @@ -1680,7 +1683,7 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { - intel_hdmi_prepare(encoder); + intel_hdmi_prepare(encoder, pipe_config); vlv_phy_pre_pll_enable(encoder); } @@ -1689,7 +1692,7 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { - intel_hdmi_prepare(encoder); + intel_hdmi_prepare(encoder, pipe_config); chv_phy_pre_pll_enable(encoder); } @@ -1732,9 +1735,6 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder, struct intel_hdmi *intel_hdmi = &dport->hdmi; struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = - to_intel_crtc(encoder->base.crtc); - const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; chv_phy_pre_encoder_enable(encoder); @@ -1743,8 +1743,8 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder, chv_set_phy_signal_level(encoder, 128, 102, false); intel_hdmi->set_infoframes(&encoder->base, - intel_crtc->config->has_hdmi_sink, - adjusted_mode); + pipe_config->has_hdmi_sink, + pipe_config, conn_state); g4x_enable_hdmi(encoder, pipe_config, conn_state); -- cgit v1.2.3 From 30576a2c462d9658508c3de67601aa565f973064 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 24 Nov 2016 09:34:01 +0000 Subject: drm/i915/debugfs: Drop i915_hws_info i915_hws_info() has not been kept upto date (missing new engines) and so I consider it to be unused. HWS is included in the error state, which would be an avenue to retrieving it if required in future (possibly via i915_engine_info). As it is currently oopsing with an rpm testcase, just remove it. Fixes: 3b3f1650b1ca ("drm/i915: Allocate intel_engine_cs structure only for the enabled engines") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=98838 Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Cc: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20161124093401.18852-1-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/i915_debugfs.c | 25 ------------------------- 1 file changed, 25 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 437212a95b19..86825d030b87 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -935,27 +935,6 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) return 0; } -static int i915_hws_info(struct seq_file *m, void *data) -{ - struct drm_info_node *node = m->private; - struct drm_i915_private *dev_priv = node_to_i915(node); - struct intel_engine_cs *engine; - const u32 *hws; - int i; - - engine = dev_priv->engine[(uintptr_t)node->info_ent->data]; - hws = engine->status_page.page_addr; - if (hws == NULL) - return 0; - - for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { - seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", - i * 4, - hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); - } - return 0; -} - #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) static ssize_t @@ -5408,10 +5387,6 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_gem_seqno", i915_gem_seqno_info, 0}, {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, {"i915_gem_interrupt", i915_interrupt_info, 0}, - {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, - {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, - {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, - {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0}, {"i915_guc_info", i915_guc_info, 0}, {"i915_guc_load_status", i915_guc_load_status_info, 0}, -- cgit v1.2.3 From e3f51ece02f877d2ce9619aa8d5919db56f39582 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Mon, 14 Nov 2016 22:39:34 +0000 Subject: drm/i915: cleanup use of INSTR_CLIENT_MASK Doing cmd_header >> 29 to extract our 3-bit client value where we know cmd_header is a u32 shouldn't then also require the use of a mask. So remove the redundant operation and get rid of INSTR_CLIENT_MASK now that there are no longer any users. Cc: Chris Wilson Signed-off-by: Matthew Auld Reviewed-by: Chris Wilson Signed-off-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/1479163174-29686-1-git-send-email-matthew.auld@intel.com --- drivers/gpu/drm/i915/i915_cmd_parser.c | 6 +++--- drivers/gpu/drm/i915/i915_reg.h | 1 - 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index f5762cde288b..be74c0f8ab34 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -558,7 +558,7 @@ static const struct drm_i915_reg_table hsw_blt_reg_tables[] = { static u32 gen7_render_get_cmd_length_mask(u32 cmd_header) { - u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT; + u32 client = cmd_header >> INSTR_CLIENT_SHIFT; u32 subclient = (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT; @@ -577,7 +577,7 @@ static u32 gen7_render_get_cmd_length_mask(u32 cmd_header) static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header) { - u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT; + u32 client = cmd_header >> INSTR_CLIENT_SHIFT; u32 subclient = (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT; u32 op = (cmd_header & INSTR_26_TO_24_MASK) >> INSTR_26_TO_24_SHIFT; @@ -600,7 +600,7 @@ static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header) static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header) { - u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT; + u32 client = cmd_header >> INSTR_CLIENT_SHIFT; if (client == INSTR_MI_CLIENT) return 0x3F; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 329848743819..6747d6864aaf 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -294,7 +294,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) * Instruction field definitions used by the command parser */ #define INSTR_CLIENT_SHIFT 29 -#define INSTR_CLIENT_MASK 0xE0000000 #define INSTR_MI_CLIENT 0x0 #define INSTR_BC_CLIENT 0x2 #define INSTR_RC_CLIENT 0x3 -- cgit v1.2.3 From 007873b30b9001348c0dfc96deb9db220a3be116 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Wed, 23 Nov 2016 23:02:27 +0000 Subject: drm/i915: kick out cmd_parser specific structs from i915_drv.h No sense in keeping the cmd_descriptor and cmd_table structs in i915_drv.h, now that they are no longer referenced externally. Cc: Chris Wilson Signed-off-by: Matthew Auld Reviewed-by: Joonas Lahtinen Signed-off-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/1479942147-9837-1-git-send-email-matthew.auld@intel.com --- drivers/gpu/drm/i915/i915_cmd_parser.c | 96 ++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_drv.h | 96 ---------------------------------- 2 files changed, 96 insertions(+), 96 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index be74c0f8ab34..06319dcd249d 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -86,6 +86,102 @@ * general bitmasking mechanism. */ +/* + * A command that requires special handling by the command parser. + */ +struct drm_i915_cmd_descriptor { + /* + * Flags describing how the command parser processes the command. + * + * CMD_DESC_FIXED: The command has a fixed length if this is set, + * a length mask if not set + * CMD_DESC_SKIP: The command is allowed but does not follow the + * standard length encoding for the opcode range in + * which it falls + * CMD_DESC_REJECT: The command is never allowed + * CMD_DESC_REGISTER: The command should be checked against the + * register whitelist for the appropriate ring + * CMD_DESC_MASTER: The command is allowed if the submitting process + * is the DRM master + */ + u32 flags; +#define CMD_DESC_FIXED (1<<0) +#define CMD_DESC_SKIP (1<<1) +#define CMD_DESC_REJECT (1<<2) +#define CMD_DESC_REGISTER (1<<3) +#define CMD_DESC_BITMASK (1<<4) +#define CMD_DESC_MASTER (1<<5) + + /* + * The command's unique identification bits and the bitmask to get them. + * This isn't strictly the opcode field as defined in the spec and may + * also include type, subtype, and/or subop fields. + */ + struct { + u32 value; + u32 mask; + } cmd; + + /* + * The command's length. The command is either fixed length (i.e. does + * not include a length field) or has a length field mask. The flag + * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has + * a length mask. All command entries in a command table must include + * length information. + */ + union { + u32 fixed; + u32 mask; + } length; + + /* + * Describes where to find a register address in the command to check + * against the ring's register whitelist. Only valid if flags has the + * CMD_DESC_REGISTER bit set. + * + * A non-zero step value implies that the command may access multiple + * registers in sequence (e.g. LRI), in that case step gives the + * distance in dwords between individual offset fields. + */ + struct { + u32 offset; + u32 mask; + u32 step; + } reg; + +#define MAX_CMD_DESC_BITMASKS 3 + /* + * Describes command checks where a particular dword is masked and + * compared against an expected value. If the command does not match + * the expected value, the parser rejects it. Only valid if flags has + * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero + * are valid. + * + * If the check specifies a non-zero condition_mask then the parser + * only performs the check when the bits specified by condition_mask + * are non-zero. + */ + struct { + u32 offset; + u32 mask; + u32 expected; + u32 condition_offset; + u32 condition_mask; + } bits[MAX_CMD_DESC_BITMASKS]; +}; + +/* + * A table of commands requiring special handling by the command parser. + * + * Each engine has an array of tables. Each table consists of an array of + * command descriptors, which must be sorted with command opcodes in + * ascending order. + */ +struct drm_i915_cmd_table { + const struct drm_i915_cmd_descriptor *table; + int count; +}; + #define STD_MI_OPCODE_SHIFT (32 - 9) #define STD_3D_OPCODE_SHIFT (32 - 16) #define STD_2D_OPCODE_SHIFT (32 - 10) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e3f1a4b1b269..01f506728205 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2454,102 +2454,6 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg) (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0)) -/* - * A command that requires special handling by the command parser. - */ -struct drm_i915_cmd_descriptor { - /* - * Flags describing how the command parser processes the command. - * - * CMD_DESC_FIXED: The command has a fixed length if this is set, - * a length mask if not set - * CMD_DESC_SKIP: The command is allowed but does not follow the - * standard length encoding for the opcode range in - * which it falls - * CMD_DESC_REJECT: The command is never allowed - * CMD_DESC_REGISTER: The command should be checked against the - * register whitelist for the appropriate ring - * CMD_DESC_MASTER: The command is allowed if the submitting process - * is the DRM master - */ - u32 flags; -#define CMD_DESC_FIXED (1<<0) -#define CMD_DESC_SKIP (1<<1) -#define CMD_DESC_REJECT (1<<2) -#define CMD_DESC_REGISTER (1<<3) -#define CMD_DESC_BITMASK (1<<4) -#define CMD_DESC_MASTER (1<<5) - - /* - * The command's unique identification bits and the bitmask to get them. - * This isn't strictly the opcode field as defined in the spec and may - * also include type, subtype, and/or subop fields. - */ - struct { - u32 value; - u32 mask; - } cmd; - - /* - * The command's length. The command is either fixed length (i.e. does - * not include a length field) or has a length field mask. The flag - * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has - * a length mask. All command entries in a command table must include - * length information. - */ - union { - u32 fixed; - u32 mask; - } length; - - /* - * Describes where to find a register address in the command to check - * against the ring's register whitelist. Only valid if flags has the - * CMD_DESC_REGISTER bit set. - * - * A non-zero step value implies that the command may access multiple - * registers in sequence (e.g. LRI), in that case step gives the - * distance in dwords between individual offset fields. - */ - struct { - u32 offset; - u32 mask; - u32 step; - } reg; - -#define MAX_CMD_DESC_BITMASKS 3 - /* - * Describes command checks where a particular dword is masked and - * compared against an expected value. If the command does not match - * the expected value, the parser rejects it. Only valid if flags has - * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero - * are valid. - * - * If the check specifies a non-zero condition_mask then the parser - * only performs the check when the bits specified by condition_mask - * are non-zero. - */ - struct { - u32 offset; - u32 mask; - u32 expected; - u32 condition_offset; - u32 condition_mask; - } bits[MAX_CMD_DESC_BITMASKS]; -}; - -/* - * A table of commands requiring special handling by the command parser. - * - * Each engine has an array of tables. Each table consists of an array of - * command descriptors, which must be sorted with command opcodes in - * ascending order. - */ -struct drm_i915_cmd_table { - const struct drm_i915_cmd_descriptor *table; - int count; -}; - static inline const struct intel_device_info * intel_info(const struct drm_i915_private *dev_priv) { -- cgit v1.2.3 From 9607ae79710afb453173b90d5bf564788a6e09b1 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 24 Nov 2016 09:47:52 +0000 Subject: drm/i915/debugfs: Increment return value of gt.next_seqno The i915_next_seqno read value is to be the next seqno used by the kernel. However, in the conversion to atomics ops for gt.next_seqno, in commit 28176ef4cfa5 ("drm/i915: Reserve space in the global seqno during request allocation"), this was changed from a post-increment to a pre-increment. This increment was missed from the value reported by debugfs, so in effect it was reporting the current seqno (last assigned), not the next seqno. Fixes: 28176ef4cfa5 ("drm/i915: Reserve space in the global seqno during request allocation") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=81209 Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/20161124094752.19129-1-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 86825d030b87..0c6323e0f9f9 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1026,7 +1026,7 @@ i915_next_seqno_get(void *data, u64 *val) { struct drm_i915_private *dev_priv = data; - *val = atomic_read(&dev_priv->gt.global_timeline.next_seqno); + *val = 1 + atomic_read(&dev_priv->gt.global_timeline.next_seqno); return 0; } -- cgit v1.2.3 From 41736a8e3331a67445b271e73be39536c498659e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 24 Nov 2016 12:58:51 +0000 Subject: drm/i915: Use the precomputed value for whether to enable command parsing As i915.enable_cmd_parser is an unsafe option, make it read-only at runtime. Now that it is constant, we can use the value determined during initialisation as to whether we need the cmdparser at execbuffer time. v2: Remove the inline for its single user, it is clear enough (and shorter) without! Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/20161124125851.6615-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_cmd_parser.c | 23 +---------------------- drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +- drivers/gpu/drm/i915/i915_params.c | 6 +++--- drivers/gpu/drm/i915/i915_params.h | 2 +- 5 files changed, 6 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 06319dcd249d..6c310a8a97a7 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -1131,27 +1131,6 @@ unpin_src: return dst; } -/** - * intel_engine_needs_cmd_parser() - should a given engine use software - * command parsing? - * @engine: the engine in question - * - * Only certain platforms require software batch buffer command parsing, and - * only when enabled via module parameter. - * - * Return: true if the engine requires software command parsing - */ -bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine) -{ - if (!engine->needs_cmd_parser) - return false; - - if (!USES_PPGTT(engine->i915)) - return false; - - return (i915.enable_cmd_parser == 1); -} - static bool check_cmd(const struct intel_engine_cs *engine, const struct drm_i915_cmd_descriptor *desc, const u32 *cmd, u32 length, @@ -1375,7 +1354,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) /* If the command parser is not enabled, report 0 - unsupported */ for_each_engine(engine, dev_priv, id) { - if (intel_engine_needs_cmd_parser(engine)) { + if (engine->needs_cmd_parser) { active = true; break; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 01f506728205..1ec96194ce46 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3452,7 +3452,6 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type); int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); -bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine); int intel_engine_cmd_parser(struct intel_engine_cs *engine, struct drm_i915_gem_object *batch_obj, struct drm_i915_gem_object *shadow_batch_obj, diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 522ecfb4dc9d..985142cc6cc9 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1713,7 +1713,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } params->args_batch_start_offset = args->batch_start_offset; - if (intel_engine_needs_cmd_parser(engine) && args->batch_len) { + if (engine->needs_cmd_parser && args->batch_len) { struct i915_vma *vma; vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry, diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index d46ffe7086bc..0e280fbd52f1 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -50,7 +50,7 @@ struct i915_params i915 __read_mostly = { .error_capture = true, .invert_brightness = 0, .disable_display = 0, - .enable_cmd_parser = 1, + .enable_cmd_parser = true, .use_mmio_flip = 0, .mmio_debug = 0, .verbose_state_checks = 1, @@ -188,9 +188,9 @@ MODULE_PARM_DESC(invert_brightness, module_param_named(disable_display, i915.disable_display, bool, 0400); MODULE_PARM_DESC(disable_display, "Disable display (default: false)"); -module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, int, 0600); +module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, bool, 0400); MODULE_PARM_DESC(enable_cmd_parser, - "Enable command parsing (1=enabled [default], 0=disabled)"); + "Enable command parsing (true=enabled [default], false=disabled)"); module_param_named_unsafe(use_mmio_flip, i915.use_mmio_flip, int, 0600); MODULE_PARM_DESC(use_mmio_flip, diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 817ad959941e..8e433de04679 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -44,7 +44,6 @@ struct i915_params { int disable_power_well; int enable_ips; int invert_brightness; - int enable_cmd_parser; int enable_guc_loading; int enable_guc_submission; int guc_log_level; @@ -53,6 +52,7 @@ struct i915_params { int edp_vswing; unsigned int inject_load_failure; /* leave bools at the end to not create holes */ + bool enable_cmd_parser; bool enable_hangcheck; bool fastboot; bool prefault_disable; -- cgit v1.2.3 From 60abfbb86a8d51576f90c5adcbb4f547a2952782 Mon Sep 17 00:00:00 2001 From: Libin Yang Date: Fri, 11 Nov 2016 16:46:28 +0800 Subject: drm/i915/audio: fix hdmi audio noise issue Some monitors will have noise or even no sound after applying the patch 6014ac12. In patch 6014ac12, it will reset the cts value to 0 for HDMI. However, we need to disable Enable CTS or M Prog bit. This is the initial setting after HW reset. Fixes: 6014ac122ed0 ("drm/i915/audio: set proper N/M in modeset") Signed-off-by: Libin Yang Signed-off-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/1478853988-139842-1-git-send-email-libin.yang@intel.com --- drivers/gpu/drm/i915/intel_audio.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 1c509f7410f5..49f10538d4aa 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -351,10 +351,13 @@ hsw_hdmi_audio_config_update(struct intel_crtc *intel_crtc, enum port port, I915_WRITE(HSW_AUD_CFG(pipe), tmp); + /* + * Let's disable "Enable CTS or M Prog bit" + * and let HW calculate the value + */ tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(pipe)); - tmp &= ~AUD_CONFIG_M_MASK; + tmp &= ~AUD_M_CTS_M_PROG_ENABLE; tmp &= ~AUD_M_CTS_M_VALUE_INDEX; - tmp |= AUD_M_CTS_M_PROG_ENABLE; I915_WRITE(HSW_AUD_M_CTS_ENABLE(pipe), tmp); } -- cgit v1.2.3 From 4c266edb4c98e929ea8871525fa2c7bd2483e05f Mon Sep 17 00:00:00 2001 From: Joonas Lahtinen Date: Thu, 24 Nov 2016 14:47:49 +0000 Subject: drm/i915: Rename i915_gem_timeline.next_seqno to .seqno Rename i915_gem_timeline member 'next_seqno' into 'seqno' as the variable is pre-increment. We've already had two bugs due to the confusing name, second is fixed as follow-up patch. Cc: Chris Wilson Signed-off-by: Joonas Lahtinen Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/20161124144750.2610-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_debugfs.c | 4 ++-- drivers/gpu/drm/i915/i915_gem_request.c | 14 +++++++------- drivers/gpu/drm/i915/i915_gem_timeline.h | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 0c6323e0f9f9..66067c439935 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -552,7 +552,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n", engine->name, work->flip_queued_req->global_seqno, - atomic_read(&dev_priv->gt.global_timeline.next_seqno), + atomic_read(&dev_priv->gt.global_timeline.seqno), intel_engine_get_seqno(engine), i915_gem_request_completed(work->flip_queued_req)); } else @@ -1026,7 +1026,7 @@ i915_next_seqno_get(void *data, u64 *val) { struct drm_i915_private *dev_priv = data; - *val = 1 + atomic_read(&dev_priv->gt.global_timeline.next_seqno); + *val = 1 + atomic_read(&dev_priv->gt.global_timeline.seqno); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 9f37eaa3723a..82904595eaae 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -330,11 +330,11 @@ static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno) GEM_BUG_ON(i915->gt.active_requests > 1); /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ - if (!i915_seqno_passed(seqno, atomic_read(&timeline->next_seqno))) { + if (!i915_seqno_passed(seqno, atomic_read(&timeline->seqno))) { while (intel_breadcrumbs_busy(i915)) cond_resched(); /* spin until threads are complete */ } - atomic_set(&timeline->next_seqno, seqno); + atomic_set(&timeline->seqno, seqno); /* Finally reset hw state */ for_each_engine(engine, i915, id) @@ -369,11 +369,11 @@ int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno) static int reserve_global_seqno(struct drm_i915_private *i915) { u32 active_requests = ++i915->gt.active_requests; - u32 next_seqno = atomic_read(&i915->gt.global_timeline.next_seqno); + u32 seqno = atomic_read(&i915->gt.global_timeline.seqno); int ret; /* Reservation is fine until we need to wrap around */ - if (likely(next_seqno + active_requests > next_seqno)) + if (likely(seqno + active_requests > seqno)) return 0; ret = i915_gem_init_global_seqno(i915, 0); @@ -387,13 +387,13 @@ static int reserve_global_seqno(struct drm_i915_private *i915) static u32 __timeline_get_seqno(struct i915_gem_timeline *tl) { - /* next_seqno only incremented under a mutex */ - return ++tl->next_seqno.counter; + /* seqno only incremented under a mutex */ + return ++tl->seqno.counter; } static u32 timeline_get_seqno(struct i915_gem_timeline *tl) { - return atomic_inc_return(&tl->next_seqno); + return atomic_inc_return(&tl->seqno); } void __i915_gem_request_submit(struct drm_i915_gem_request *request) diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_gem_timeline.h index 98d99a62b4ae..f2e51f42cc2f 100644 --- a/drivers/gpu/drm/i915/i915_gem_timeline.h +++ b/drivers/gpu/drm/i915/i915_gem_timeline.h @@ -56,7 +56,7 @@ struct intel_timeline { struct i915_gem_timeline { struct list_head link; - atomic_t next_seqno; + atomic_t seqno; struct drm_i915_private *i915; const char *name; -- cgit v1.2.3 From 312c3c47ef769f1d3f2a0d4e5db78a6d16fe0b26 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 24 Nov 2016 14:47:50 +0000 Subject: drm/i915/debugfs: Update pageflip information Show the last submitted seqno to the engine, not the overall next seqno, as this is more pertinent information when inspecting the pageflip and whether the CS or display engine stalled. Signed-off-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/20161124144750.2610-2-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_debugfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 66067c439935..4a431d9c700c 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -549,10 +549,10 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) if (work->flip_queued_req) { struct intel_engine_cs *engine = work->flip_queued_req->engine; - seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n", + seq_printf(m, "Flip queued on %s at seqno %x, last submitted seqno %x [current breadcrumb %x], completed? %d\n", engine->name, work->flip_queued_req->global_seqno, - atomic_read(&dev_priv->gt.global_timeline.seqno), + intel_engine_last_submit(engine), intel_engine_get_seqno(engine), i915_gem_request_completed(work->flip_queued_req)); } else -- cgit v1.2.3 From 4ffd6e0cfe1816fced8ab8d2612e1edde81aac2a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 25 Nov 2016 13:17:15 +0000 Subject: drm/i915: Add is-completed assert to request retire entrypoint While we will check that the request is completed prior to being retired, by placing an assert that the request is complete at the entrypoint of the function we can more clearly document the function's preconditions. Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/20161125131718.20978-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_request.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 82904595eaae..bd7b21f70283 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -281,6 +281,8 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req) struct drm_i915_gem_request *tmp; lockdep_assert_held(&req->i915->drm.struct_mutex); + GEM_BUG_ON(!i915_gem_request_completed(req)); + if (list_empty(&req->link)) return; -- cgit v1.2.3 From 1618bdb89b5d8b47edf42d9c6ea96ecf001ad625 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 25 Nov 2016 13:17:16 +0000 Subject: drm/i915: Assert no external observers when unwind a failed request alloc Before we return the request back to the kmem_cache after a failed i915_gem_request_alloc(), we should assert that it has not been added to any global state tracking. Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/20161125131718.20978-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_request.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index bd7b21f70283..ed6cead22a86 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -599,6 +599,11 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, return req; err_ctx: + /* Make sure we didn't add ourselves to external state before freeing */ + GEM_BUG_ON(!list_empty(&req->active_list)); + GEM_BUG_ON(!list_empty(&req->priotree.signalers_list)); + GEM_BUG_ON(!list_empty(&req->priotree.waiters_list)); + i915_gem_context_put(ctx); kmem_cache_free(dev_priv->requests, req); err_unreserve: -- cgit v1.2.3 From 48bc2a4a427ad81578f887d71d45794619a77211 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 25 Nov 2016 13:17:17 +0000 Subject: drm/i915: Hold a reference on the request for its fence chain Currently, we have an active reference for the request until it is retired. Though it cannot be retired before it has been executed by hardware, the request may be completed before we have finished processing the execute fence, i.e. we may continue to process that fence as we free the request. Fixes: 5590af3e115a ("drm/i915: Drive request submission through fence callbacks") Fixes: 23902e49c999 ("drm/i915: Split request submit/execute phase into two") Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/20161125131718.20978-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_request.c | 34 ++++++++++++++++++++++++++------- drivers/gpu/drm/i915/i915_sw_fence.h | 5 +++++ 2 files changed, 32 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index ed6cead22a86..94d71b639f12 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -200,8 +200,8 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) struct i915_gem_active *active, *next; lockdep_assert_held(&request->i915->drm.struct_mutex); - GEM_BUG_ON(!i915_sw_fence_done(&request->submit)); - GEM_BUG_ON(!i915_sw_fence_done(&request->execute)); + GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit)); + GEM_BUG_ON(!i915_sw_fence_signaled(&request->execute)); GEM_BUG_ON(!i915_gem_request_completed(request)); GEM_BUG_ON(!request->i915->gt.active_requests); @@ -451,11 +451,17 @@ void i915_gem_request_submit(struct drm_i915_gem_request *request) static int __i915_sw_fence_call submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { - if (state == FENCE_COMPLETE) { - struct drm_i915_gem_request *request = - container_of(fence, typeof(*request), submit); + struct drm_i915_gem_request *request = + container_of(fence, typeof(*request), submit); + switch (state) { + case FENCE_COMPLETE: request->engine->submit_request(request); + break; + + case FENCE_FREE: + i915_gem_request_put(request); + break; } return NOTIFY_DONE; @@ -464,6 +470,18 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) static int __i915_sw_fence_call execute_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { + struct drm_i915_gem_request *request = + container_of(fence, typeof(*request), execute); + + switch (state) { + case FENCE_COMPLETE: + break; + + case FENCE_FREE: + i915_gem_request_put(request); + break; + } + return NOTIFY_DONE; } @@ -551,8 +569,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, req->timeline->fence_context, __timeline_get_seqno(req->timeline->common)); - i915_sw_fence_init(&req->submit, submit_notify); - i915_sw_fence_init(&req->execute, execute_notify); + /* We bump the ref for the fence chain */ + i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify); + i915_sw_fence_init(&i915_gem_request_get(req)->execute, execute_notify); + /* Ensure that the execute fence completes after the submit fence - * as we complete the execute fence from within the submit fence * callback, its completion would otherwise be visible first. diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index 7508d23f823b..0f3185ef7f4e 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h @@ -75,6 +75,11 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, unsigned long timeout, gfp_t gfp); +static inline bool i915_sw_fence_signaled(const struct i915_sw_fence *fence) +{ + return atomic_read(&fence->pending) <= 0; +} + static inline bool i915_sw_fence_done(const struct i915_sw_fence *fence) { return atomic_read(&fence->pending) < 0; -- cgit v1.2.3 From fc1584059d6c438b1febafa1c207ae1d3c6643e8 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 25 Nov 2016 13:17:18 +0000 Subject: drm/i915: Integrate i915_sw_fence with debugobjects Add the tracking required to enable debugobjects for fences to improve error detection in BAT. The debugobject interface lets us track the lifetime and phases of the fences even while being embedded into larger structs, i.e. to check they are not used after they have been released. v2: Don't populate the stubs, debugobjects checks for a NULL pointer and treats it equivalently. Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/20161125131718.20978-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Kconfig.debug | 13 +++ drivers/gpu/drm/i915/i915_gem_request.c | 9 ++ drivers/gpu/drm/i915/i915_sw_fence.c | 140 ++++++++++++++++++++++++++++++-- drivers/gpu/drm/i915/i915_sw_fence.h | 6 ++ 4 files changed, 161 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 51ba630a134b..a6c69b8cb1d2 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -22,6 +22,7 @@ config DRM_I915_DEBUG select X86_MSR # used by igt/pm_rpm select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks) select DRM_DEBUG_MM if DRM=y + select DRM_I915_SW_FENCE_DEBUG_OBJECTS if DRM_I915=y default n help Choose this option to turn on extra driver debugging that may affect @@ -43,3 +44,15 @@ config DRM_I915_DEBUG_GEM If in doubt, say "N". +config DRM_I915_SW_FENCE_DEBUG_OBJECTS + bool "Enable additional driver debugging for fence objects" + depends on DRM_I915=y + select DEBUG_OBJECTS + default n + help + Choose this option to turn on extra driver debugging that may affect + performance but will catch some internal issues. + + Recommended for driver developers only. + + If in doubt, say "N". diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 94d71b639f12..fcf22b0e2967 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -62,6 +62,15 @@ static void i915_fence_release(struct dma_fence *fence) { struct drm_i915_gem_request *req = to_request(fence); + /* The request is put onto a RCU freelist (i.e. the address + * is immediately reused), mark the fences as being freed now. + * Otherwise the debugobjects for the fences are only marked as + * freed when the slab cache itself is freed, and so we would get + * caught trying to reuse dead objects. + */ + i915_sw_fence_fini(&req->submit); + i915_sw_fence_fini(&req->execute); + kmem_cache_free(req->i915->requests, req); } diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 147420ccf49c..f5a88092dacf 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -17,6 +17,92 @@ static DEFINE_SPINLOCK(i915_sw_fence_lock); +enum { + DEBUG_FENCE_IDLE = 0, + DEBUG_FENCE_NOTIFY, +}; + +#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS + +static void *i915_sw_fence_debug_hint(void *addr) +{ + return (void *)(((struct i915_sw_fence *)addr)->flags & I915_SW_FENCE_MASK); +} + +static struct debug_obj_descr i915_sw_fence_debug_descr = { + .name = "i915_sw_fence", + .debug_hint = i915_sw_fence_debug_hint, +}; + +static inline void debug_fence_init(struct i915_sw_fence *fence) +{ + debug_object_init(fence, &i915_sw_fence_debug_descr); +} + +static inline void debug_fence_activate(struct i915_sw_fence *fence) +{ + debug_object_activate(fence, &i915_sw_fence_debug_descr); +} + +static inline void debug_fence_set_state(struct i915_sw_fence *fence, + int old, int new) +{ + debug_object_active_state(fence, &i915_sw_fence_debug_descr, old, new); +} + +static inline void debug_fence_deactivate(struct i915_sw_fence *fence) +{ + debug_object_deactivate(fence, &i915_sw_fence_debug_descr); +} + +static inline void debug_fence_destroy(struct i915_sw_fence *fence) +{ + debug_object_destroy(fence, &i915_sw_fence_debug_descr); +} + +static inline void debug_fence_free(struct i915_sw_fence *fence) +{ + debug_object_free(fence, &i915_sw_fence_debug_descr); +} + +static inline void debug_fence_assert(struct i915_sw_fence *fence) +{ + debug_object_assert_init(fence, &i915_sw_fence_debug_descr); +} + +#else + +static inline void debug_fence_init(struct i915_sw_fence *fence) +{ +} + +static inline void debug_fence_activate(struct i915_sw_fence *fence) +{ +} + +static inline void debug_fence_set_state(struct i915_sw_fence *fence, + int old, int new) +{ +} + +static inline void debug_fence_deactivate(struct i915_sw_fence *fence) +{ +} + +static inline void debug_fence_destroy(struct i915_sw_fence *fence) +{ +} + +static inline void debug_fence_free(struct i915_sw_fence *fence) +{ +} + +static inline void debug_fence_assert(struct i915_sw_fence *fence) +{ +} + +#endif + static int __i915_sw_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { @@ -26,25 +112,37 @@ static int __i915_sw_fence_notify(struct i915_sw_fence *fence, return fn(fence, state); } -static void i915_sw_fence_free(struct kref *kref) +#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS +void i915_sw_fence_fini(struct i915_sw_fence *fence) +{ + debug_fence_free(fence); +} +#endif + +static void i915_sw_fence_release(struct kref *kref) { struct i915_sw_fence *fence = container_of(kref, typeof(*fence), kref); WARN_ON(atomic_read(&fence->pending) > 0); + debug_fence_destroy(fence); - if (fence->flags & I915_SW_FENCE_MASK) + if (fence->flags & I915_SW_FENCE_MASK) { __i915_sw_fence_notify(fence, FENCE_FREE); - else + } else { + i915_sw_fence_fini(fence); kfree(fence); + } } static void i915_sw_fence_put(struct i915_sw_fence *fence) { - kref_put(&fence->kref, i915_sw_fence_free); + debug_fence_assert(fence); + kref_put(&fence->kref, i915_sw_fence_release); } static struct i915_sw_fence *i915_sw_fence_get(struct i915_sw_fence *fence) { + debug_fence_assert(fence); kref_get(&fence->kref); return fence; } @@ -56,6 +154,7 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence, wait_queue_t *pos, *next; unsigned long flags; + debug_fence_deactivate(fence); atomic_set_release(&fence->pending, -1); /* 0 -> -1 [done] */ /* @@ -88,23 +187,33 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence, } while (1); } spin_unlock_irqrestore(&x->lock, flags); + + debug_fence_assert(fence); } static void __i915_sw_fence_complete(struct i915_sw_fence *fence, struct list_head *continuation) { + debug_fence_assert(fence); + if (!atomic_dec_and_test(&fence->pending)) return; + debug_fence_set_state(fence, DEBUG_FENCE_IDLE, DEBUG_FENCE_NOTIFY); + if (fence->flags & I915_SW_FENCE_MASK && __i915_sw_fence_notify(fence, FENCE_COMPLETE) != NOTIFY_DONE) return; + debug_fence_set_state(fence, DEBUG_FENCE_NOTIFY, DEBUG_FENCE_IDLE); + __i915_sw_fence_wake_up_all(fence, continuation); } static void i915_sw_fence_complete(struct i915_sw_fence *fence) { + debug_fence_assert(fence); + if (WARN_ON(i915_sw_fence_done(fence))) return; @@ -113,6 +222,7 @@ static void i915_sw_fence_complete(struct i915_sw_fence *fence) static void i915_sw_fence_await(struct i915_sw_fence *fence) { + debug_fence_assert(fence); WARN_ON(atomic_inc_return(&fence->pending) <= 1); } @@ -123,18 +233,26 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence, { BUG_ON((unsigned long)fn & ~I915_SW_FENCE_MASK); + debug_fence_init(fence); + __init_waitqueue_head(&fence->wait, name, key); kref_init(&fence->kref); atomic_set(&fence->pending, 1); fence->flags = (unsigned long)fn; } -void i915_sw_fence_commit(struct i915_sw_fence *fence) +static void __i915_sw_fence_commit(struct i915_sw_fence *fence) { i915_sw_fence_complete(fence); i915_sw_fence_put(fence); } +void i915_sw_fence_commit(struct i915_sw_fence *fence) +{ + debug_fence_activate(fence); + __i915_sw_fence_commit(fence); +} + static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *key) { list_del(&wq->task_list); @@ -206,9 +324,13 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, unsigned long flags; int pending; + debug_fence_assert(fence); + if (i915_sw_fence_done(signaler)) return 0; + debug_fence_assert(signaler); + /* The dependency graph must be acyclic. */ if (unlikely(i915_sw_fence_check_if_after(fence, signaler))) return -EINVAL; @@ -279,7 +401,7 @@ static void timer_i915_sw_fence_wake(unsigned long data) dma_fence_put(cb->dma); cb->dma = NULL; - i915_sw_fence_commit(cb->fence); + __i915_sw_fence_commit(cb->fence); cb->timer.function = NULL; } @@ -290,7 +412,7 @@ static void dma_i915_sw_fence_wake(struct dma_fence *dma, del_timer_sync(&cb->timer); if (cb->timer.function) - i915_sw_fence_commit(cb->fence); + __i915_sw_fence_commit(cb->fence); dma_fence_put(cb->dma); kfree(cb); @@ -304,6 +426,8 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, struct i915_sw_dma_fence_cb *cb; int ret; + debug_fence_assert(fence); + if (dma_fence_is_signaled(dma)) return 0; @@ -349,6 +473,8 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, struct dma_fence *excl; int ret = 0, pending; + debug_fence_assert(fence); + if (write) { struct dma_fence **shared; unsigned int count, i; diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index 0f3185ef7f4e..d31cefbbcc04 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h @@ -56,6 +56,12 @@ do { \ __i915_sw_fence_init((fence), (fn), NULL, NULL) #endif +#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS +void i915_sw_fence_fini(struct i915_sw_fence *fence); +#else +static inline void i915_sw_fence_fini(struct i915_sw_fence *fence) {} +#endif + void i915_sw_fence_commit(struct i915_sw_fence *fence); int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, -- cgit v1.2.3 From c39055b072f8b1c403aa7967e8efbb1a1a59b348 Mon Sep 17 00:00:00 2001 From: Ander Conselvan de Oliveira Date: Wed, 23 Nov 2016 16:21:44 +0200 Subject: drm/i915: Pass dev_priv to intel_setup_outputs() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pass dev_priv to intel_setup_outputs() and functions called by it, since those are all intel i915 specific functions. Also, in the majority of the functions dev_priv is used more often than dev. In the rare cases where there are a few calls back into drm core, a local dev variable was added. v2: Don't convert dev to &dev_priv->drm in intel_dsi_init. (Ville) Signed-off-by: Ander Conselvan de Oliveira Reviewed-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1479910904-11005-1-git-send-email-ander.conselvan.de.oliveira@intel.com --- drivers/gpu/drm/i915/i915_drv.c | 4 +- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/intel_crt.c | 7 +- drivers/gpu/drm/i915/intel_ddi.c | 5 +- drivers/gpu/drm/i915/intel_display.c | 120 ++++++++++++++++------------------- drivers/gpu/drm/i915/intel_dp.c | 8 +-- drivers/gpu/drm/i915/intel_drv.h | 22 ++++--- drivers/gpu/drm/i915/intel_dsi.c | 4 +- drivers/gpu/drm/i915/intel_dvo.c | 9 ++- drivers/gpu/drm/i915/intel_hdmi.c | 8 +-- drivers/gpu/drm/i915/intel_lvds.c | 4 +- drivers/gpu/drm/i915/intel_psr.c | 4 +- drivers/gpu/drm/i915/intel_sdvo.c | 19 +++--- drivers/gpu/drm/i915/intel_tv.c | 4 +- 14 files changed, 105 insertions(+), 115 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index b893e67b4897..0fba4bb5655e 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1579,7 +1579,7 @@ static int i915_drm_resume(struct drm_device *dev) intel_pps_unlock_regs_wa(dev_priv); intel_opregion_setup(dev_priv); - intel_init_pch_refclk(dev); + intel_init_pch_refclk(dev_priv); drm_mode_config_reset(dev); /* @@ -2418,7 +2418,7 @@ static int intel_runtime_resume(struct device *kdev) intel_guc_resume(dev); if (IS_GEN6(dev_priv)) - intel_init_pch_refclk(dev); + intel_init_pch_refclk(dev_priv); if (IS_BROXTON(dev_priv)) { bxt_disable_dc9(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1ec96194ce46..0fbc31134561 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3570,7 +3570,7 @@ extern void intel_display_resume(struct drm_device *dev); extern void i915_redisable_vga(struct drm_i915_private *dev_priv); extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv); extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); -extern void intel_init_pch_refclk(struct drm_device *dev); +extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv); extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val); extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable); diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 86ecec5601d4..385e29af8baa 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -836,12 +836,11 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = { .destroy = intel_encoder_destroy, }; -void intel_crt_init(struct drm_device *dev) +void intel_crt_init(struct drm_i915_private *dev_priv) { struct drm_connector *connector; struct intel_crt *crt; struct intel_connector *intel_connector; - struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t adpa_reg; u32 adpa; @@ -881,10 +880,10 @@ void intel_crt_init(struct drm_device *dev) connector = &intel_connector->base; crt->connector = intel_connector; - drm_connector_init(dev, &intel_connector->base, + drm_connector_init(&dev_priv->drm, &intel_connector->base, &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); - drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs, + drm_encoder_init(&dev_priv->drm, &crt->base.base, &intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC, "CRT"); intel_connector_attach_encoder(intel_connector, &crt->base); diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 0d680dabac77..f8e939d17160 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -2155,9 +2155,8 @@ intel_ddi_get_link_dpll(struct intel_dp *intel_dp, int clock) return pll; } -void intel_ddi_init(struct drm_device *dev, enum port port) +void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_digital_port *intel_dig_port; struct intel_encoder *intel_encoder; struct drm_encoder *encoder; @@ -2219,7 +2218,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port) intel_encoder = &intel_dig_port->base; encoder = &intel_encoder->base; - drm_encoder_init(dev, encoder, &intel_ddi_funcs, + drm_encoder_init(&dev_priv->drm, encoder, &intel_ddi_funcs, DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port)); intel_encoder->compute_config = intel_ddi_compute_config; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d51d885b2584..972c7e4ebf03 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8897,9 +8897,8 @@ out: return ret; } -static void ironlake_init_pch_refclk(struct drm_device *dev) +static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder; int i; u32 val, final; @@ -8911,7 +8910,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) bool using_ssc_source = false; /* We need to take the global config into account */ - for_each_intel_encoder(dev, encoder) { + for_each_intel_encoder(&dev_priv->drm, encoder) { switch (encoder->type) { case INTEL_OUTPUT_LVDS: has_panel = true; @@ -9167,10 +9166,9 @@ static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) * - Sequence to enable CLKOUT_DP without spread * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O */ -static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, - bool with_fdi) +static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, + bool with_spread, bool with_fdi) { - struct drm_i915_private *dev_priv = to_i915(dev); uint32_t reg, tmp; if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) @@ -9208,9 +9206,8 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, } /* Sequence to disable CLKOUT_DP */ -static void lpt_disable_clkout_dp(struct drm_device *dev) +static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); uint32_t reg, tmp; mutex_lock(&dev_priv->sb_lock); @@ -9295,12 +9292,12 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) #undef BEND_IDX -static void lpt_init_pch_refclk(struct drm_device *dev) +static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) { struct intel_encoder *encoder; bool has_vga = false; - for_each_intel_encoder(dev, encoder) { + for_each_intel_encoder(&dev_priv->drm, encoder) { switch (encoder->type) { case INTEL_OUTPUT_ANALOG: has_vga = true; @@ -9311,24 +9308,22 @@ static void lpt_init_pch_refclk(struct drm_device *dev) } if (has_vga) { - lpt_bend_clkout_dp(to_i915(dev), 0); - lpt_enable_clkout_dp(dev, true, true); + lpt_bend_clkout_dp(dev_priv, 0); + lpt_enable_clkout_dp(dev_priv, true, true); } else { - lpt_disable_clkout_dp(dev); + lpt_disable_clkout_dp(dev_priv); } } /* * Initialize reference clocks when the driver loads */ -void intel_init_pch_refclk(struct drm_device *dev) +void intel_init_pch_refclk(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) - ironlake_init_pch_refclk(dev); + ironlake_init_pch_refclk(dev_priv); else if (HAS_PCH_LPT(dev_priv)) - lpt_init_pch_refclk(dev); + lpt_init_pch_refclk(dev_priv); } static void ironlake_set_pipeconf(struct drm_crtc *crtc) @@ -10175,7 +10170,6 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) */ void hsw_enable_pc8(struct drm_i915_private *dev_priv) { - struct drm_device *dev = &dev_priv->drm; uint32_t val; DRM_DEBUG_KMS("Enabling package C8+\n"); @@ -10186,19 +10180,18 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv) I915_WRITE(SOUTH_DSPCLK_GATE_D, val); } - lpt_disable_clkout_dp(dev); + lpt_disable_clkout_dp(dev_priv); hsw_disable_lcpll(dev_priv, true, true); } void hsw_disable_pc8(struct drm_i915_private *dev_priv) { - struct drm_device *dev = &dev_priv->drm; uint32_t val; DRM_DEBUG_KMS("Disabling package C8+\n"); hsw_restore_lcpll(dev_priv); - lpt_init_pch_refclk(dev); + lpt_init_pch_refclk(dev_priv); if (HAS_PCH_LPT_LP(dev_priv)) { val = I915_READ(SOUTH_DSPCLK_GATE_D); @@ -15494,9 +15487,8 @@ static void intel_pps_init(struct drm_i915_private *dev_priv) intel_pps_unlock_regs_wa(dev_priv); } -static void intel_setup_outputs(struct drm_device *dev) +static void intel_setup_outputs(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder; bool dpd_is_edp = false; @@ -15507,10 +15499,10 @@ static void intel_setup_outputs(struct drm_device *dev) * prevent the registeration of both eDP and LVDS and the incorrect * sharing of the PPS. */ - intel_lvds_init(dev); + intel_lvds_init(dev_priv); if (intel_crt_present(dev_priv)) - intel_crt_init(dev); + intel_crt_init(dev_priv); if (IS_BROXTON(dev_priv)) { /* @@ -15518,11 +15510,11 @@ static void intel_setup_outputs(struct drm_device *dev) * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to * detect the ports. */ - intel_ddi_init(dev, PORT_A); - intel_ddi_init(dev, PORT_B); - intel_ddi_init(dev, PORT_C); + intel_ddi_init(dev_priv, PORT_A); + intel_ddi_init(dev_priv, PORT_B); + intel_ddi_init(dev_priv, PORT_C); - intel_dsi_init(dev); + intel_dsi_init(dev_priv); } else if (HAS_DDI(dev_priv)) { int found; @@ -15534,18 +15526,18 @@ static void intel_setup_outputs(struct drm_device *dev) found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; /* WaIgnoreDDIAStrap: skl */ if (found || IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) - intel_ddi_init(dev, PORT_A); + intel_ddi_init(dev_priv, PORT_A); /* DDI B, C and D detection is indicated by the SFUSE_STRAP * register */ found = I915_READ(SFUSE_STRAP); if (found & SFUSE_STRAP_DDIB_DETECTED) - intel_ddi_init(dev, PORT_B); + intel_ddi_init(dev_priv, PORT_B); if (found & SFUSE_STRAP_DDIC_DETECTED) - intel_ddi_init(dev, PORT_C); + intel_ddi_init(dev_priv, PORT_C); if (found & SFUSE_STRAP_DDID_DETECTED) - intel_ddi_init(dev, PORT_D); + intel_ddi_init(dev_priv, PORT_D); /* * On SKL we don't have a way to detect DDI-E so we rely on VBT. */ @@ -15553,35 +15545,35 @@ static void intel_setup_outputs(struct drm_device *dev) (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp || dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi || dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi)) - intel_ddi_init(dev, PORT_E); + intel_ddi_init(dev_priv, PORT_E); } else if (HAS_PCH_SPLIT(dev_priv)) { int found; dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D); if (has_edp_a(dev_priv)) - intel_dp_init(dev, DP_A, PORT_A); + intel_dp_init(dev_priv, DP_A, PORT_A); if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { /* PCH SDVOB multiplex with HDMIB */ - found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B); + found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); if (!found) - intel_hdmi_init(dev, PCH_HDMIB, PORT_B); + intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) - intel_dp_init(dev, PCH_DP_B, PORT_B); + intel_dp_init(dev_priv, PCH_DP_B, PORT_B); } if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) - intel_hdmi_init(dev, PCH_HDMIC, PORT_C); + intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) - intel_hdmi_init(dev, PCH_HDMID, PORT_D); + intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D); if (I915_READ(PCH_DP_C) & DP_DETECTED) - intel_dp_init(dev, PCH_DP_C, PORT_C); + intel_dp_init(dev_priv, PCH_DP_C, PORT_C); if (I915_READ(PCH_DP_D) & DP_DETECTED) - intel_dp_init(dev, PCH_DP_D, PORT_D); + intel_dp_init(dev_priv, PCH_DP_D, PORT_D); } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { bool has_edp, has_port; @@ -15603,16 +15595,16 @@ static void intel_setup_outputs(struct drm_device *dev) has_edp = intel_dp_is_edp(dev_priv, PORT_B); has_port = intel_bios_is_port_present(dev_priv, PORT_B); if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) - has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B); + has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B); if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) - intel_hdmi_init(dev, VLV_HDMIB, PORT_B); + intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); has_edp = intel_dp_is_edp(dev_priv, PORT_C); has_port = intel_bios_is_port_present(dev_priv, PORT_C); if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) - has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C); + has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C); if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) - intel_hdmi_init(dev, VLV_HDMIC, PORT_C); + intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); if (IS_CHERRYVIEW(dev_priv)) { /* @@ -15621,63 +15613,63 @@ static void intel_setup_outputs(struct drm_device *dev) */ has_port = intel_bios_is_port_present(dev_priv, PORT_D); if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) - intel_dp_init(dev, CHV_DP_D, PORT_D); + intel_dp_init(dev_priv, CHV_DP_D, PORT_D); if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) - intel_hdmi_init(dev, CHV_HDMID, PORT_D); + intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D); } - intel_dsi_init(dev); + intel_dsi_init(dev_priv); } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) { bool found = false; if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { DRM_DEBUG_KMS("probing SDVOB\n"); - found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B); + found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); if (!found && IS_G4X(dev_priv)) { DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); - intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); + intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); } if (!found && IS_G4X(dev_priv)) - intel_dp_init(dev, DP_B, PORT_B); + intel_dp_init(dev_priv, DP_B, PORT_B); } /* Before G4X SDVOC doesn't have its own detect register */ if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { DRM_DEBUG_KMS("probing SDVOC\n"); - found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C); + found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); } if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { if (IS_G4X(dev_priv)) { DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); - intel_hdmi_init(dev, GEN4_HDMIC, PORT_C); + intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); } if (IS_G4X(dev_priv)) - intel_dp_init(dev, DP_C, PORT_C); + intel_dp_init(dev_priv, DP_C, PORT_C); } if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED)) - intel_dp_init(dev, DP_D, PORT_D); + intel_dp_init(dev_priv, DP_D, PORT_D); } else if (IS_GEN2(dev_priv)) - intel_dvo_init(dev); + intel_dvo_init(dev_priv); if (SUPPORTS_TV(dev_priv)) - intel_tv_init(dev); + intel_tv_init(dev_priv); - intel_psr_init(dev); + intel_psr_init(dev_priv); - for_each_intel_encoder(dev, encoder) { + for_each_intel_encoder(&dev_priv->drm, encoder) { encoder->base.possible_crtcs = encoder->crtc_mask; encoder->base.possible_clones = intel_encoder_clones(encoder); } - intel_init_pch_refclk(dev); + intel_init_pch_refclk(dev_priv); - drm_helper_move_panel_connectors_to_head(dev); + drm_helper_move_panel_connectors_to_head(&dev_priv->drm); } static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) @@ -16532,7 +16524,7 @@ int intel_modeset_init(struct drm_device *dev) /* Just disable it once at startup */ i915_disable_vga(dev_priv); - intel_setup_outputs(dev); + intel_setup_outputs(dev_priv); drm_modeset_lock_all(dev); intel_modeset_setup_hw_state(dev); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 8026a83fec9e..9dfbde472419 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -5770,11 +5770,10 @@ fail: return false; } -bool intel_dp_init(struct drm_device *dev, +bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg, enum port port) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_digital_port *intel_dig_port; struct intel_encoder *intel_encoder; struct drm_encoder *encoder; @@ -5791,8 +5790,9 @@ bool intel_dp_init(struct drm_device *dev, intel_encoder = &intel_dig_port->base; encoder = &intel_encoder->base; - if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, - DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port))) + if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base, + &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS, + "DP %c", port_name(port))) goto err_encoder_init; intel_encoder->compute_config = intel_dp_compute_config; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 9bbe5c53a272..2b71567b5292 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1151,7 +1151,7 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv); void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv); /* intel_crt.c */ -void intel_crt_init(struct drm_device *dev); +void intel_crt_init(struct drm_i915_private *dev_priv); void intel_crt_reset(struct drm_encoder *encoder); /* intel_ddi.c */ @@ -1162,7 +1162,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder, struct drm_connector_state *old_conn_state); void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder); void hsw_fdi_link_train(struct drm_crtc *crtc); -void intel_ddi_init(struct drm_device *dev, enum port port); +void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port); enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder); bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc); @@ -1387,7 +1387,8 @@ void intel_csr_ucode_suspend(struct drm_i915_private *); void intel_csr_ucode_resume(struct drm_i915_private *); /* intel_dp.c */ -bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port); +bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg, + enum port port); bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, struct intel_connector *intel_connector); void intel_dp_set_link_params(struct intel_dp *intel_dp, @@ -1462,13 +1463,13 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector); int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); /* intel_dsi.c */ -void intel_dsi_init(struct drm_device *dev); +void intel_dsi_init(struct drm_i915_private *dev_priv); /* intel_dsi_dcs_backlight.c */ int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector); /* intel_dvo.c */ -void intel_dvo_init(struct drm_device *dev); +void intel_dvo_init(struct drm_i915_private *dev_priv); /* intel_hotplug.c */ void intel_hpd_poll_init(struct drm_i915_private *dev_priv); @@ -1532,7 +1533,8 @@ void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv); void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv); /* intel_hdmi.c */ -void intel_hdmi_init(struct drm_device *dev, i915_reg_t hdmi_reg, enum port port); +void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg, + enum port port); void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, struct intel_connector *intel_connector); struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); @@ -1543,7 +1545,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable); /* intel_lvds.c */ -void intel_lvds_init(struct drm_device *dev); +void intel_lvds_init(struct drm_i915_private *dev_priv); struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev); bool intel_is_dual_link_lvds(struct drm_device *dev); @@ -1616,7 +1618,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv, void intel_psr_flush(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits, enum fb_op_origin origin); -void intel_psr_init(struct drm_device *dev); +void intel_psr_init(struct drm_i915_private *dev_priv); void intel_psr_single_frame_update(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits); @@ -1761,7 +1763,7 @@ static inline int intel_enable_rc6(void) } /* intel_sdvo.c */ -bool intel_sdvo_init(struct drm_device *dev, +bool intel_sdvo_init(struct drm_i915_private *dev_priv, i915_reg_t reg, enum port port); @@ -1776,7 +1778,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc); void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work); /* intel_tv.c */ -void intel_tv_init(struct drm_device *dev); +void intel_tv_init(struct drm_i915_private *dev_priv); /* intel_atomic.c */ int intel_connector_atomic_get_property(struct drm_connector *connector, diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index a35c1412e43b..3bc6213afd3e 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -1434,15 +1434,15 @@ static void intel_dsi_add_properties(struct intel_connector *connector) } } -void intel_dsi_init(struct drm_device *dev) +void intel_dsi_init(struct drm_i915_private *dev_priv) { + struct drm_device *dev = &dev_priv->drm; struct intel_dsi *intel_dsi; struct intel_encoder *intel_encoder; struct drm_encoder *encoder; struct intel_connector *intel_connector; struct drm_connector *connector; struct drm_display_mode *scan, *fixed_mode = NULL; - struct drm_i915_private *dev_priv = to_i915(dev); enum port port; unsigned int i; diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 708645443046..50da89dcb92b 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -422,9 +422,8 @@ static enum port intel_dvo_port(i915_reg_t dvo_reg) return PORT_C; } -void intel_dvo_init(struct drm_device *dev) +void intel_dvo_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *intel_encoder; struct intel_dvo *intel_dvo; struct intel_connector *intel_connector; @@ -511,7 +510,7 @@ void intel_dvo_init(struct drm_device *dev) continue; port = intel_dvo_port(dvo->dvo_reg); - drm_encoder_init(dev, &intel_encoder->base, + drm_encoder_init(&dev_priv->drm, &intel_encoder->base, &intel_dvo_enc_funcs, encoder_type, "DVO %c", port_name(port)); @@ -523,14 +522,14 @@ void intel_dvo_init(struct drm_device *dev) case INTEL_DVO_CHIP_TMDS: intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) | (1 << INTEL_OUTPUT_DVO); - drm_connector_init(dev, connector, + drm_connector_init(&dev_priv->drm, connector, &intel_dvo_connector_funcs, DRM_MODE_CONNECTOR_DVII); encoder_type = DRM_MODE_ENCODER_TMDS; break; case INTEL_DVO_CHIP_LVDS: intel_encoder->cloneable = 0; - drm_connector_init(dev, connector, + drm_connector_init(&dev_priv->drm, connector, &intel_dvo_connector_funcs, DRM_MODE_CONNECTOR_LVDS); encoder_type = DRM_MODE_ENCODER_LVDS; diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index a12278740f95..374e38a4da43 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1933,10 +1933,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, } } -void intel_hdmi_init(struct drm_device *dev, +void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg, enum port port) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_digital_port *intel_dig_port; struct intel_encoder *intel_encoder; struct intel_connector *intel_connector; @@ -1953,8 +1952,9 @@ void intel_hdmi_init(struct drm_device *dev, intel_encoder = &intel_dig_port->base; - drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, - DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port)); + drm_encoder_init(&dev_priv->drm, &intel_encoder->base, + &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS, + "HDMI %c", port_name(port)); intel_encoder->compute_config = intel_hdmi_compute_config; if (HAS_PCH_SPLIT(dev_priv)) { diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index d12ef0047d49..ea1ce17d8e4a 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -971,9 +971,9 @@ static bool intel_lvds_supported(struct drm_i915_private *dev_priv) * Create the connector, register the LVDS DDC bus, and try to figure out what * modes we can display on the LVDS panel (if present). */ -void intel_lvds_init(struct drm_device *dev) +void intel_lvds_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_device *dev = &dev_priv->drm; struct intel_lvds_encoder *lvds_encoder; struct intel_encoder *intel_encoder; struct intel_lvds_connector *lvds_connector; diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 7b488e2793d9..5c3616e54577 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -818,10 +818,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, * This function is called only once at driver load to initialize basic * PSR stuff. */ -void intel_psr_init(struct drm_device *dev) +void intel_psr_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ? HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 27808e91cb5a..054acd974e2d 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2342,9 +2342,9 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device) } static u8 -intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo) +intel_sdvo_get_slave_addr(struct drm_i915_private *dev_priv, + struct intel_sdvo *sdvo) { - struct drm_i915_private *dev_priv = to_i915(dev); struct sdvo_device_mapping *my_mapping, *other_mapping; if (sdvo->port == PORT_B) { @@ -2934,9 +2934,9 @@ static const struct i2c_algorithm intel_sdvo_ddc_proxy = { static bool intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo, - struct drm_device *dev) + struct drm_i915_private *dev_priv) { - struct pci_dev *pdev = dev->pdev; + struct pci_dev *pdev = dev_priv->drm.pdev; sdvo->ddc.owner = THIS_MODULE; sdvo->ddc.class = I2C_CLASS_DDC; @@ -2957,10 +2957,9 @@ static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv, WARN_ON(port != PORT_B && port != PORT_C); } -bool intel_sdvo_init(struct drm_device *dev, +bool intel_sdvo_init(struct drm_i915_private *dev_priv, i915_reg_t sdvo_reg, enum port port) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *intel_encoder; struct intel_sdvo *intel_sdvo; int i; @@ -2973,16 +2972,18 @@ bool intel_sdvo_init(struct drm_device *dev, intel_sdvo->sdvo_reg = sdvo_reg; intel_sdvo->port = port; - intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1; + intel_sdvo->slave_addr = + intel_sdvo_get_slave_addr(dev_priv, intel_sdvo) >> 1; intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo); - if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) + if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev_priv)) goto err_i2c_bus; /* encoder type will be decided later */ intel_encoder = &intel_sdvo->base; intel_encoder->type = INTEL_OUTPUT_SDVO; intel_encoder->port = port; - drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0, + drm_encoder_init(&dev_priv->drm, &intel_encoder->base, + &intel_sdvo_enc_funcs, 0, "SDVO %c", port_name(port)); /* Read the regs to test if we can talk to the device */ diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 78cdfc6833d6..eb692e4ffe01 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1537,9 +1537,9 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = { }; void -intel_tv_init(struct drm_device *dev) +intel_tv_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_device *dev = &dev_priv->drm; struct drm_connector *connector; struct intel_tv *intel_tv; struct intel_encoder *intel_encoder; -- cgit v1.2.3 From a3f79ca63b9bcf5a527b886953092bfd65e78940 Mon Sep 17 00:00:00 2001 From: Ander Conselvan de Oliveira Date: Thu, 24 Nov 2016 15:23:27 +0200 Subject: drm/i915: Don't sanitize has_decoupled_mmio if platform is not broxton The check in __intel_uncore_early_sanitize() to disable decoupled mmio would disable it for every platform that is not broxton. While that's not a problem now since only broxton supports that, simply setting .has_decoupled_mmio in a new platform's device info wouldn't suffice. So avoid future confusion and change the workaround to only change the value of has_decoupled_mmio for broxton. v2: git add compile fix. (Ander) Cc: Praveen Paneri Cc: Tvrtko Ursulin Cc: Imre Deak Signed-off-by: Ander Conselvan de Oliveira Reviewed-by: Imre Deak Link: http://patchwork.freedesktop.org/patch/msgid/1479993807-29353-1-git-send-email-ander.conselvan.de.oliveira@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/intel_uncore.c | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0fbc31134561..c5349aaaf874 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2579,6 +2579,7 @@ intel_info(const struct drm_i915_private *dev_priv) #define BXT_REVID_A0 0x0 #define BXT_REVID_A1 0x1 #define BXT_REVID_B0 0x3 +#define BXT_REVID_B_LAST 0x8 #define BXT_REVID_C0 0x9 #define IS_BXT_REVID(dev_priv, since, until) \ diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index d7be0d94ba4d..07779d0c71e6 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -421,8 +421,7 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, GT_FIFO_CTL_RC6_POLICY_STALL); } - /* Enable Decoupled MMIO only on BXT C stepping onwards */ - if (!IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST)) info->has_decoupled_mmio = false; intel_uncore_forcewake_reset(dev_priv, restore_forcewake); -- cgit v1.2.3 From 8c4f24f95c682352d20c0e7bf1878e390787a7eb Mon Sep 17 00:00:00 2001 From: Arkadiusz Hiler Date: Fri, 25 Nov 2016 18:59:33 +0100 Subject: drm/i915: Rename intel_guc.h to intel_uc.h GuC is not the only one micro controller we have. There are also HuC and DMC. Making the file more general will help with code organization. Cc: Chris Wilson Cc: Michal Winiarski Signed-off-by: Arkadiusz Hiler Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/1480096777-12573-2-git-send-email-arkadiusz.hiler@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_guc_submission.c | 2 +- drivers/gpu/drm/i915/intel_guc.h | 192 ----------------------------- drivers/gpu/drm/i915/intel_guc_loader.c | 2 +- drivers/gpu/drm/i915/intel_uc.h | 192 +++++++++++++++++++++++++++++ 5 files changed, 195 insertions(+), 195 deletions(-) delete mode 100644 drivers/gpu/drm/i915/intel_guc.h create mode 100644 drivers/gpu/drm/i915/intel_uc.h (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c5349aaaf874..297ad03ab0c2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -55,7 +55,7 @@ #include "intel_bios.h" #include "intel_dpll_mgr.h" -#include "intel_guc.h" +#include "intel_uc.h" #include "intel_lrc.h" #include "intel_ringbuffer.h" diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 4462112725ef..e14220e961e5 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -26,7 +26,7 @@ #include #include #include "i915_drv.h" -#include "intel_guc.h" +#include "intel_uc.h" /** * DOC: GuC-based command submission diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h deleted file mode 100644 index 02337a81abc2..000000000000 --- a/drivers/gpu/drm/i915/intel_guc.h +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Copyright © 2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ -#ifndef _INTEL_GUC_H_ -#define _INTEL_GUC_H_ - -#include "intel_guc_fwif.h" -#include "i915_guc_reg.h" -#include "intel_ringbuffer.h" - -struct drm_i915_gem_request; - -/* - * This structure primarily describes the GEM object shared with the GuC. - * The GEM object is held for the entire lifetime of our interaction with - * the GuC, being allocated before the GuC is loaded with its firmware. - * Because there's no way to update the address used by the GuC after - * initialisation, the shared object must stay pinned into the GGTT as - * long as the GuC is in use. We also keep the first page (only) mapped - * into kernel address space, as it includes shared data that must be - * updated on every request submission. - * - * The single GEM object described here is actually made up of several - * separate areas, as far as the GuC is concerned. The first page (kept - * kmap'd) includes the "process decriptor" which holds sequence data for - * the doorbell, and one cacheline which actually *is* the doorbell; a - * write to this will "ring the doorbell" (i.e. send an interrupt to the - * GuC). The subsequent pages of the client object constitute the work - * queue (a circular array of work items), again described in the process - * descriptor. Work queue pages are mapped momentarily as required. - * - * We also keep a few statistics on failures. Ideally, these should all - * be zero! - * no_wq_space: times that the submission pre-check found no space was - * available in the work queue (note, the queue is shared, - * not per-engine). It is OK for this to be nonzero, but - * it should not be huge! - * q_fail: failed to enqueue a work item. This should never happen, - * because we check for space beforehand. - * b_fail: failed to ring the doorbell. This should never happen, unless - * somehow the hardware misbehaves, or maybe if the GuC firmware - * crashes? We probably need to reset the GPU to recover. - * retcode: errno from last guc_submit() - */ -struct i915_guc_client { - struct i915_vma *vma; - void *vaddr; - struct i915_gem_context *owner; - struct intel_guc *guc; - - uint32_t engines; /* bitmap of (host) engine ids */ - uint32_t priority; - uint32_t ctx_index; - uint32_t proc_desc_offset; - - uint32_t doorbell_offset; - uint32_t cookie; - uint16_t doorbell_id; - uint16_t padding[3]; /* Maintain alignment */ - - spinlock_t wq_lock; - uint32_t wq_offset; - uint32_t wq_size; - uint32_t wq_tail; - uint32_t wq_rsvd; - uint32_t no_wq_space; - uint32_t b_fail; - int retcode; - - /* Per-engine counts of GuC submissions */ - uint64_t submissions[I915_NUM_ENGINES]; -}; - -enum intel_guc_fw_status { - GUC_FIRMWARE_FAIL = -1, - GUC_FIRMWARE_NONE = 0, - GUC_FIRMWARE_PENDING, - GUC_FIRMWARE_SUCCESS -}; - -/* - * This structure encapsulates all the data needed during the process - * of fetching, caching, and loading the firmware image into the GuC. - */ -struct intel_guc_fw { - const char * guc_fw_path; - size_t guc_fw_size; - struct drm_i915_gem_object * guc_fw_obj; - enum intel_guc_fw_status guc_fw_fetch_status; - enum intel_guc_fw_status guc_fw_load_status; - - uint16_t guc_fw_major_wanted; - uint16_t guc_fw_minor_wanted; - uint16_t guc_fw_major_found; - uint16_t guc_fw_minor_found; - - uint32_t header_size; - uint32_t header_offset; - uint32_t rsa_size; - uint32_t rsa_offset; - uint32_t ucode_size; - uint32_t ucode_offset; -}; - -struct intel_guc_log { - uint32_t flags; - struct i915_vma *vma; - void *buf_addr; - struct workqueue_struct *flush_wq; - struct work_struct flush_work; - struct rchan *relay_chan; - - /* logging related stats */ - u32 capture_miss_count; - u32 flush_interrupt_count; - u32 prev_overflow_count[GUC_MAX_LOG_BUFFER]; - u32 total_overflow_count[GUC_MAX_LOG_BUFFER]; - u32 flush_count[GUC_MAX_LOG_BUFFER]; -}; - -struct intel_guc { - struct intel_guc_fw guc_fw; - struct intel_guc_log log; - - /* GuC2Host interrupt related state */ - bool interrupts_enabled; - - struct i915_vma *ads_vma; - struct i915_vma *ctx_pool_vma; - struct ida ctx_ids; - - struct i915_guc_client *execbuf_client; - - DECLARE_BITMAP(doorbell_bitmap, GUC_MAX_DOORBELLS); - uint32_t db_cacheline; /* Cyclic counter mod pagesize */ - - /* Action status & statistics */ - uint64_t action_count; /* Total commands issued */ - uint32_t action_cmd; /* Last command word */ - uint32_t action_status; /* Last return status */ - uint32_t action_fail; /* Total number of failures */ - int32_t action_err; /* Last error code */ - - uint64_t submissions[I915_NUM_ENGINES]; - uint32_t last_seqno[I915_NUM_ENGINES]; - - /* To serialize the Host2GuC actions */ - struct mutex action_lock; -}; - -/* intel_guc_loader.c */ -extern void intel_guc_init(struct drm_device *dev); -extern int intel_guc_setup(struct drm_device *dev); -extern void intel_guc_fini(struct drm_device *dev); -extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status); -extern int intel_guc_suspend(struct drm_device *dev); -extern int intel_guc_resume(struct drm_device *dev); - -/* i915_guc_submission.c */ -int i915_guc_submission_init(struct drm_i915_private *dev_priv); -int i915_guc_submission_enable(struct drm_i915_private *dev_priv); -int i915_guc_wq_reserve(struct drm_i915_gem_request *rq); -void i915_guc_wq_unreserve(struct drm_i915_gem_request *request); -void i915_guc_submission_disable(struct drm_i915_private *dev_priv); -void i915_guc_submission_fini(struct drm_i915_private *dev_priv); -void i915_guc_capture_logs(struct drm_i915_private *dev_priv); -void i915_guc_flush_logs(struct drm_i915_private *dev_priv); -void i915_guc_register(struct drm_i915_private *dev_priv); -void i915_guc_unregister(struct drm_i915_private *dev_priv); -int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val); - -#endif diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index e2d0bdaee0c8..27f6fd57eb79 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -28,7 +28,7 @@ */ #include #include "i915_drv.h" -#include "intel_guc.h" +#include "intel_uc.h" /** * DOC: GuC-specific firmware loader diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h new file mode 100644 index 000000000000..0d8a4939d5ad --- /dev/null +++ b/drivers/gpu/drm/i915/intel_uc.h @@ -0,0 +1,192 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ +#ifndef _INTEL_UC_H_ +#define _INTEL_UC_H_ + +#include "intel_guc_fwif.h" +#include "i915_guc_reg.h" +#include "intel_ringbuffer.h" + +struct drm_i915_gem_request; + +/* + * This structure primarily describes the GEM object shared with the GuC. + * The GEM object is held for the entire lifetime of our interaction with + * the GuC, being allocated before the GuC is loaded with its firmware. + * Because there's no way to update the address used by the GuC after + * initialisation, the shared object must stay pinned into the GGTT as + * long as the GuC is in use. We also keep the first page (only) mapped + * into kernel address space, as it includes shared data that must be + * updated on every request submission. + * + * The single GEM object described here is actually made up of several + * separate areas, as far as the GuC is concerned. The first page (kept + * kmap'd) includes the "process decriptor" which holds sequence data for + * the doorbell, and one cacheline which actually *is* the doorbell; a + * write to this will "ring the doorbell" (i.e. send an interrupt to the + * GuC). The subsequent pages of the client object constitute the work + * queue (a circular array of work items), again described in the process + * descriptor. Work queue pages are mapped momentarily as required. + * + * We also keep a few statistics on failures. Ideally, these should all + * be zero! + * no_wq_space: times that the submission pre-check found no space was + * available in the work queue (note, the queue is shared, + * not per-engine). It is OK for this to be nonzero, but + * it should not be huge! + * q_fail: failed to enqueue a work item. This should never happen, + * because we check for space beforehand. + * b_fail: failed to ring the doorbell. This should never happen, unless + * somehow the hardware misbehaves, or maybe if the GuC firmware + * crashes? We probably need to reset the GPU to recover. + * retcode: errno from last guc_submit() + */ +struct i915_guc_client { + struct i915_vma *vma; + void *vaddr; + struct i915_gem_context *owner; + struct intel_guc *guc; + + uint32_t engines; /* bitmap of (host) engine ids */ + uint32_t priority; + uint32_t ctx_index; + uint32_t proc_desc_offset; + + uint32_t doorbell_offset; + uint32_t cookie; + uint16_t doorbell_id; + uint16_t padding[3]; /* Maintain alignment */ + + spinlock_t wq_lock; + uint32_t wq_offset; + uint32_t wq_size; + uint32_t wq_tail; + uint32_t wq_rsvd; + uint32_t no_wq_space; + uint32_t b_fail; + int retcode; + + /* Per-engine counts of GuC submissions */ + uint64_t submissions[I915_NUM_ENGINES]; +}; + +enum intel_guc_fw_status { + GUC_FIRMWARE_FAIL = -1, + GUC_FIRMWARE_NONE = 0, + GUC_FIRMWARE_PENDING, + GUC_FIRMWARE_SUCCESS +}; + +/* + * This structure encapsulates all the data needed during the process + * of fetching, caching, and loading the firmware image into the GuC. + */ +struct intel_guc_fw { + const char * guc_fw_path; + size_t guc_fw_size; + struct drm_i915_gem_object * guc_fw_obj; + enum intel_guc_fw_status guc_fw_fetch_status; + enum intel_guc_fw_status guc_fw_load_status; + + uint16_t guc_fw_major_wanted; + uint16_t guc_fw_minor_wanted; + uint16_t guc_fw_major_found; + uint16_t guc_fw_minor_found; + + uint32_t header_size; + uint32_t header_offset; + uint32_t rsa_size; + uint32_t rsa_offset; + uint32_t ucode_size; + uint32_t ucode_offset; +}; + +struct intel_guc_log { + uint32_t flags; + struct i915_vma *vma; + void *buf_addr; + struct workqueue_struct *flush_wq; + struct work_struct flush_work; + struct rchan *relay_chan; + + /* logging related stats */ + u32 capture_miss_count; + u32 flush_interrupt_count; + u32 prev_overflow_count[GUC_MAX_LOG_BUFFER]; + u32 total_overflow_count[GUC_MAX_LOG_BUFFER]; + u32 flush_count[GUC_MAX_LOG_BUFFER]; +}; + +struct intel_guc { + struct intel_guc_fw guc_fw; + struct intel_guc_log log; + + /* GuC2Host interrupt related state */ + bool interrupts_enabled; + + struct i915_vma *ads_vma; + struct i915_vma *ctx_pool_vma; + struct ida ctx_ids; + + struct i915_guc_client *execbuf_client; + + DECLARE_BITMAP(doorbell_bitmap, GUC_MAX_DOORBELLS); + uint32_t db_cacheline; /* Cyclic counter mod pagesize */ + + /* Action status & statistics */ + uint64_t action_count; /* Total commands issued */ + uint32_t action_cmd; /* Last command word */ + uint32_t action_status; /* Last return status */ + uint32_t action_fail; /* Total number of failures */ + int32_t action_err; /* Last error code */ + + uint64_t submissions[I915_NUM_ENGINES]; + uint32_t last_seqno[I915_NUM_ENGINES]; + + /* To serialize the Host2GuC actions */ + struct mutex action_lock; +}; + +/* intel_guc_loader.c */ +extern void intel_guc_init(struct drm_device *dev); +extern int intel_guc_setup(struct drm_device *dev); +extern void intel_guc_fini(struct drm_device *dev); +extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status); +extern int intel_guc_suspend(struct drm_device *dev); +extern int intel_guc_resume(struct drm_device *dev); + +/* i915_guc_submission.c */ +int i915_guc_submission_init(struct drm_i915_private *dev_priv); +int i915_guc_submission_enable(struct drm_i915_private *dev_priv); +int i915_guc_wq_reserve(struct drm_i915_gem_request *rq); +void i915_guc_wq_unreserve(struct drm_i915_gem_request *request); +void i915_guc_submission_disable(struct drm_i915_private *dev_priv); +void i915_guc_submission_fini(struct drm_i915_private *dev_priv); +void i915_guc_capture_logs(struct drm_i915_private *dev_priv); +void i915_guc_flush_logs(struct drm_i915_private *dev_priv); +void i915_guc_register(struct drm_i915_private *dev_priv); +void i915_guc_unregister(struct drm_i915_private *dev_priv); +int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val); + +#endif -- cgit v1.2.3 From a80bc45ff0cf9398484b836fc60d5e144970dc6d Mon Sep 17 00:00:00 2001 From: Arkadiusz Hiler Date: Fri, 25 Nov 2016 18:59:34 +0100 Subject: drm/i915/guc: Drop guc2host/host2guc from names To facilitate code reorganization we are renaming everything that contains guc2host or host2guc. host2guc_action() and host2guc_action_response() become guc_send() and guc_recv() respectively. Other host2guc_*() functions become simply guc_*(). Other entities are renamed basing on context they appear in: - HOST2GUC_ACTIONS_& become INTEL_GUC_ACTION_* - HOST2GUC_{INTERRUPT,TRIGGER} become GUC_SEND_{INTERRUPT,TRIGGER} - GUC2HOST_STATUS_* become INTEL_GUC_STATUS_* - GUC2HOST_MSG_* become INTEL_GUC_RECV_MSG_* - action_lock becomes send_mutex v2: drop unnecessary backslashes and use BIT() instead of '<<' v3: shortened enum names and INTEL_GUC_STATUS_* Cc: Chris Wilson Cc: Michal Winiarski Signed-off-by: Arkadiusz Hiler Link: http://patchwork.freedesktop.org/patch/msgid/1480096777-12573-3-git-send-email-arkadiusz.hiler@intel.com Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_guc_reg.h | 4 +- drivers/gpu/drm/i915/i915_guc_submission.c | 91 +++++++++++++++--------------- drivers/gpu/drm/i915/i915_irq.c | 4 +- drivers/gpu/drm/i915/intel_guc_fwif.h | 46 +++++++-------- drivers/gpu/drm/i915/intel_uc.h | 6 +- 5 files changed, 75 insertions(+), 76 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h index a47e1e4aec03..5e638fc37208 100644 --- a/drivers/gpu/drm/i915/i915_guc_reg.h +++ b/drivers/gpu/drm/i915/i915_guc_reg.h @@ -100,8 +100,8 @@ GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | \ GUC_ENABLE_MIA_CLOCK_GATING) -#define HOST2GUC_INTERRUPT _MMIO(0xc4c8) -#define HOST2GUC_TRIGGER (1<<0) +#define GUC_SEND_INTERRUPT _MMIO(0xc4c8) +#define GUC_SEND_TRIGGER (1<<0) #define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8) #define GEN8_DRB_VALID (1<<0) diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index e14220e961e5..69eba03b9823 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -49,7 +49,7 @@ * Firmware writes a success/fail code back to the action register after * processes the request. The kernel driver polls waiting for this update and * then proceeds. - * See host2guc_action() + * See guc_send() * * Doorbells: * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW) @@ -69,15 +69,14 @@ * Read GuC command/status register (SOFT_SCRATCH_0) * Return true if it contains a response rather than a command */ -static inline bool host2guc_action_response(struct drm_i915_private *dev_priv, - u32 *status) +static inline bool guc_recv(struct drm_i915_private *dev_priv, u32 *status) { u32 val = I915_READ(SOFT_SCRATCH(0)); *status = val; - return GUC2HOST_IS_RESPONSE(val); + return INTEL_GUC_RECV_IS_RESPONSE(val); } -static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len) +static int guc_send(struct intel_guc *guc, u32 *data, u32 len) { struct drm_i915_private *dev_priv = guc_to_i915(guc); u32 status; @@ -87,7 +86,7 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len) if (WARN_ON(len < 1 || len > 15)) return -EINVAL; - mutex_lock(&guc->action_lock); + mutex_lock(&guc->send_mutex); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); dev_priv->guc.action_count += 1; @@ -98,17 +97,17 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len) POSTING_READ(SOFT_SCRATCH(i - 1)); - I915_WRITE(HOST2GUC_INTERRUPT, HOST2GUC_TRIGGER); + I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER); /* * Fast commands should complete in less than 10us, so sample quickly * up to that length of time, then switch to a slower sleep-wait loop. - * No HOST2GUC command should ever take longer than 10ms. + * No INTEL_GUC_ACTION command should ever take longer than 10ms. */ - ret = wait_for_us(host2guc_action_response(dev_priv, &status), 10); + ret = wait_for_us(guc_recv(dev_priv, &status), 10); if (ret) - ret = wait_for(host2guc_action_response(dev_priv, &status), 10); - if (status != GUC2HOST_STATUS_SUCCESS) { + ret = wait_for(guc_recv(dev_priv, &status), 10); + if (status != INTEL_GUC_STATUS_SUCCESS) { /* * Either the GuC explicitly returned an error (which * we convert to -EIO here) or no response at all was @@ -126,7 +125,7 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len) dev_priv->guc.action_status = status; intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - mutex_unlock(&guc->action_lock); + mutex_unlock(&guc->send_mutex); return ret; } @@ -135,35 +134,35 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len) * Tell the GuC to allocate or deallocate a specific doorbell */ -static int host2guc_allocate_doorbell(struct intel_guc *guc, - struct i915_guc_client *client) +static int guc_allocate_doorbell(struct intel_guc *guc, + struct i915_guc_client *client) { u32 data[2]; - data[0] = HOST2GUC_ACTION_ALLOCATE_DOORBELL; + data[0] = INTEL_GUC_ACTION_ALLOCATE_DOORBELL; data[1] = client->ctx_index; - return host2guc_action(guc, data, 2); + return guc_send(guc, data, 2); } -static int host2guc_release_doorbell(struct intel_guc *guc, - struct i915_guc_client *client) +static int guc_release_doorbell(struct intel_guc *guc, + struct i915_guc_client *client) { u32 data[2]; - data[0] = HOST2GUC_ACTION_DEALLOCATE_DOORBELL; + data[0] = INTEL_GUC_ACTION_DEALLOCATE_DOORBELL; data[1] = client->ctx_index; - return host2guc_action(guc, data, 2); + return guc_send(guc, data, 2); } -static int host2guc_sample_forcewake(struct intel_guc *guc, - struct i915_guc_client *client) +static int guc_sample_forcewake(struct intel_guc *guc, + struct i915_guc_client *client) { struct drm_i915_private *dev_priv = guc_to_i915(guc); u32 data[2]; - data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; + data[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE; /* WaRsDisableCoarsePowerGating:skl,bxt */ if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) data[1] = 0; @@ -171,36 +170,36 @@ static int host2guc_sample_forcewake(struct intel_guc *guc, /* bit 0 and 1 are for Render and Media domain separately */ data[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA; - return host2guc_action(guc, data, ARRAY_SIZE(data)); + return guc_send(guc, data, ARRAY_SIZE(data)); } -static int host2guc_logbuffer_flush_complete(struct intel_guc *guc) +static int guc_logbuffer_flush_complete(struct intel_guc *guc) { u32 data[1]; - data[0] = HOST2GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE; + data[0] = INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE; - return host2guc_action(guc, data, 1); + return guc_send(guc, data, 1); } -static int host2guc_force_logbuffer_flush(struct intel_guc *guc) +static int guc_force_logbuffer_flush(struct intel_guc *guc) { u32 data[2]; - data[0] = HOST2GUC_ACTION_FORCE_LOG_BUFFER_FLUSH; + data[0] = INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH; data[1] = 0; - return host2guc_action(guc, data, 2); + return guc_send(guc, data, 2); } -static int host2guc_logging_control(struct intel_guc *guc, u32 control_val) +static int guc_logging_control(struct intel_guc *guc, u32 control_val) { u32 data[2]; - data[0] = HOST2GUC_ACTION_UK_LOG_ENABLE_LOGGING; + data[0] = INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING; data[1] = control_val; - return host2guc_action(guc, data, 2); + return guc_send(guc, data, 2); } /* @@ -226,7 +225,7 @@ static int guc_update_doorbell_id(struct intel_guc *guc, test_bit(client->doorbell_id, doorbell_bitmap)) { /* Deactivate the old doorbell */ doorbell->db_status = GUC_DOORBELL_DISABLED; - (void)host2guc_release_doorbell(guc, client); + (void)guc_release_doorbell(guc, client); __clear_bit(client->doorbell_id, doorbell_bitmap); } @@ -249,7 +248,7 @@ static int guc_update_doorbell_id(struct intel_guc *guc, __set_bit(new_id, doorbell_bitmap); doorbell->cookie = 0; doorbell->db_status = GUC_DOORBELL_ENABLED; - return host2guc_allocate_doorbell(guc, client); + return guc_allocate_doorbell(guc, client); } static int guc_init_doorbell(struct intel_guc *guc, @@ -298,7 +297,7 @@ select_doorbell_register(struct intel_guc *guc, uint32_t priority) * Select, assign and relase doorbell cachelines * * These functions track which doorbell cachelines are in use. - * The data they manipulate is protected by the host2guc lock. + * The data they manipulate is protected by the guc_send lock. */ static uint32_t select_doorbell_cacheline(struct intel_guc *guc) @@ -1500,7 +1499,7 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv) guc->ctx_pool_vma = vma; ida_init(&guc->ctx_ids); - mutex_init(&guc->action_lock); + mutex_init(&guc->send_mutex); guc_log_create(guc); guc_addon_create(guc); @@ -1526,7 +1525,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv) } guc->execbuf_client = client; - host2guc_sample_forcewake(guc, client); + guc_sample_forcewake(guc, client); guc_init_doorbell_hw(guc); /* Take over from manual control of ELSP (execlists) */ @@ -1590,13 +1589,13 @@ int intel_guc_suspend(struct drm_device *dev) ctx = dev_priv->kernel_context; - data[0] = HOST2GUC_ACTION_ENTER_S_STATE; + data[0] = INTEL_GUC_ACTION_ENTER_S_STATE; /* any value greater than GUC_POWER_D0 */ data[1] = GUC_POWER_D1; /* first page is shared data with GuC */ data[2] = i915_ggtt_offset(ctx->engine[RCS].state); - return host2guc_action(guc, data, ARRAY_SIZE(data)); + return guc_send(guc, data, ARRAY_SIZE(data)); } @@ -1619,12 +1618,12 @@ int intel_guc_resume(struct drm_device *dev) ctx = dev_priv->kernel_context; - data[0] = HOST2GUC_ACTION_EXIT_S_STATE; + data[0] = INTEL_GUC_ACTION_EXIT_S_STATE; data[1] = GUC_POWER_D0; /* first page is shared data with GuC */ data[2] = i915_ggtt_offset(ctx->engine[RCS].state); - return host2guc_action(guc, data, ARRAY_SIZE(data)); + return guc_send(guc, data, ARRAY_SIZE(data)); } void i915_guc_capture_logs(struct drm_i915_private *dev_priv) @@ -1635,7 +1634,7 @@ void i915_guc_capture_logs(struct drm_i915_private *dev_priv) * time, so get/put should be really quick. */ intel_runtime_pm_get(dev_priv); - host2guc_logbuffer_flush_complete(&dev_priv->guc); + guc_logbuffer_flush_complete(&dev_priv->guc); intel_runtime_pm_put(dev_priv); } @@ -1653,7 +1652,7 @@ void i915_guc_flush_logs(struct drm_i915_private *dev_priv) flush_work(&dev_priv->guc.log.flush_work); /* Ask GuC to update the log buffer state */ - host2guc_force_logbuffer_flush(&dev_priv->guc); + guc_force_logbuffer_flush(&dev_priv->guc); /* GuC would have updated log buffer by now, so capture it */ i915_guc_capture_logs(dev_priv); @@ -1694,9 +1693,9 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) if (!log_param.logging_enabled && (i915.guc_log_level < 0)) return 0; - ret = host2guc_logging_control(&dev_priv->guc, log_param.value); + ret = guc_logging_control(&dev_priv->guc, log_param.value); if (ret < 0) { - DRM_DEBUG_DRIVER("host2guc action failed %d\n", ret); + DRM_DEBUG_DRIVER("guc_logging_control action failed %d\n", ret); return ret; } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 07ca71cabb2b..0b119b99cd9b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1683,8 +1683,8 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) u32 msg, flush; msg = I915_READ(SOFT_SCRATCH(15)); - flush = msg & (GUC2HOST_MSG_CRASH_DUMP_POSTED | - GUC2HOST_MSG_FLUSH_LOG_BUFFER); + flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED | + INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER); if (flush) { /* Clear the message bits that are handled */ I915_WRITE(SOFT_SCRATCH(15), msg & ~flush); diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index 324ea902558b..00ca0df50f02 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h @@ -489,18 +489,18 @@ union guc_log_control { } __packed; /* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */ -enum host2guc_action { - HOST2GUC_ACTION_DEFAULT = 0x0, - HOST2GUC_ACTION_SAMPLE_FORCEWAKE = 0x6, - HOST2GUC_ACTION_ALLOCATE_DOORBELL = 0x10, - HOST2GUC_ACTION_DEALLOCATE_DOORBELL = 0x20, - HOST2GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30, - HOST2GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302, - HOST2GUC_ACTION_ENTER_S_STATE = 0x501, - HOST2GUC_ACTION_EXIT_S_STATE = 0x502, - HOST2GUC_ACTION_SLPC_REQUEST = 0x3003, - HOST2GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000, - HOST2GUC_ACTION_LIMIT +enum intel_guc_action { + INTEL_GUC_ACTION_DEFAULT = 0x0, + INTEL_GUC_ACTION_SAMPLE_FORCEWAKE = 0x6, + INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10, + INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20, + INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30, + INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302, + INTEL_GUC_ACTION_ENTER_S_STATE = 0x501, + INTEL_GUC_ACTION_EXIT_S_STATE = 0x502, + INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003, + INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000, + INTEL_GUC_ACTION_LIMIT }; /* @@ -509,22 +509,22 @@ enum host2guc_action { * by the fact that all the MASK bits are set. The remaining bits * give more detail. */ -#define GUC2HOST_RESPONSE_MASK ((u32)0xF0000000) -#define GUC2HOST_IS_RESPONSE(x) ((u32)(x) >= GUC2HOST_RESPONSE_MASK) -#define GUC2HOST_STATUS(x) (GUC2HOST_RESPONSE_MASK | (x)) +#define INTEL_GUC_RECV_MASK ((u32)0xF0000000) +#define INTEL_GUC_RECV_IS_RESPONSE(x) ((u32)(x) >= INTEL_GUC_RECV_MASK) +#define INTEL_GUC_RECV_STATUS(x) (INTEL_GUC_RECV_MASK | (x)) /* GUC will return status back to SOFT_SCRATCH_O_REG */ -enum guc2host_status { - GUC2HOST_STATUS_SUCCESS = GUC2HOST_STATUS(0x0), - GUC2HOST_STATUS_ALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x10), - GUC2HOST_STATUS_DEALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x20), - GUC2HOST_STATUS_GENERIC_FAIL = GUC2HOST_STATUS(0x0000F000) +enum intel_guc_status { + INTEL_GUC_STATUS_SUCCESS = INTEL_GUC_RECV_STATUS(0x0), + INTEL_GUC_STATUS_ALLOCATE_DOORBELL_FAIL = INTEL_GUC_RECV_STATUS(0x10), + INTEL_GUC_STATUS_DEALLOCATE_DOORBELL_FAIL = INTEL_GUC_RECV_STATUS(0x20), + INTEL_GUC_STATUS_GENERIC_FAIL = INTEL_GUC_RECV_STATUS(0x0000F000) }; /* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */ -enum guc2host_message { - GUC2HOST_MSG_CRASH_DUMP_POSTED = (1 << 1), - GUC2HOST_MSG_FLUSH_LOG_BUFFER = (1 << 3) +enum intel_guc_recv_message { + INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT(1), + INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER = BIT(3) }; #endif diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h index 0d8a4939d5ad..4b4a91e0bfd5 100644 --- a/drivers/gpu/drm/i915/intel_uc.h +++ b/drivers/gpu/drm/i915/intel_uc.h @@ -142,7 +142,7 @@ struct intel_guc { struct intel_guc_fw guc_fw; struct intel_guc_log log; - /* GuC2Host interrupt related state */ + /* intel_guc_recv interrupt related state */ bool interrupts_enabled; struct i915_vma *ads_vma; @@ -164,8 +164,8 @@ struct intel_guc { uint64_t submissions[I915_NUM_ENGINES]; uint32_t last_seqno[I915_NUM_ENGINES]; - /* To serialize the Host2GuC actions */ - struct mutex action_lock; + /* To serialize the intel_guc_send actions */ + struct mutex send_mutex; }; /* intel_guc_loader.c */ -- cgit v1.2.3 From 2d803c2de09ececa77395e48193fa51c4a2f8def Mon Sep 17 00:00:00 2001 From: Arkadiusz Hiler Date: Fri, 25 Nov 2016 18:59:35 +0100 Subject: drm/i915/guc: Move guc_{send,recv}() to intel_uc.c guc_send(), guc_recv() and related functions were introduced in the i915_guc_submission.c and their scope was limited only to that file. Those are not submission specific though. This patch moves moves them to intel_uc.c with intel_ prefix added. v2: rename intel_guc_log_* functions and clean up intel_guc_send usages Cc: Chris Wilson Cc: Michal Winiarski Signed-off-by: Arkadiusz Hiler Reviewed-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/1480096777-12573-4-git-send-email-arkadiusz.hiler@intel.com Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/Makefile | 3 +- drivers/gpu/drm/i915/i915_guc_submission.c | 147 ++++------------------------- drivers/gpu/drm/i915/intel_uc.c | 135 ++++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_uc.h | 8 ++ 4 files changed, 163 insertions(+), 130 deletions(-) create mode 100644 drivers/gpu/drm/i915/intel_uc.c (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 580602da899a..3c30916727fb 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -55,7 +55,8 @@ i915-y += i915_cmd_parser.o \ intel_uncore.o # general-purpose microcontroller (GuC) support -i915-y += intel_guc_loader.o \ +i915-y += intel_uc.o \ + intel_guc_loader.o \ i915_guc_submission.o # autogenerated null render state diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 69eba03b9823..72fb525b9a8b 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -49,7 +49,7 @@ * Firmware writes a success/fail code back to the action register after * processes the request. The kernel driver polls waiting for this update and * then proceeds. - * See guc_send() + * See intel_guc_send() * * Doorbells: * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW) @@ -65,71 +65,6 @@ * */ -/* - * Read GuC command/status register (SOFT_SCRATCH_0) - * Return true if it contains a response rather than a command - */ -static inline bool guc_recv(struct drm_i915_private *dev_priv, u32 *status) -{ - u32 val = I915_READ(SOFT_SCRATCH(0)); - *status = val; - return INTEL_GUC_RECV_IS_RESPONSE(val); -} - -static int guc_send(struct intel_guc *guc, u32 *data, u32 len) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - u32 status; - int i; - int ret; - - if (WARN_ON(len < 1 || len > 15)) - return -EINVAL; - - mutex_lock(&guc->send_mutex); - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); - - dev_priv->guc.action_count += 1; - dev_priv->guc.action_cmd = data[0]; - - for (i = 0; i < len; i++) - I915_WRITE(SOFT_SCRATCH(i), data[i]); - - POSTING_READ(SOFT_SCRATCH(i - 1)); - - I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER); - - /* - * Fast commands should complete in less than 10us, so sample quickly - * up to that length of time, then switch to a slower sleep-wait loop. - * No INTEL_GUC_ACTION command should ever take longer than 10ms. - */ - ret = wait_for_us(guc_recv(dev_priv, &status), 10); - if (ret) - ret = wait_for(guc_recv(dev_priv, &status), 10); - if (status != INTEL_GUC_STATUS_SUCCESS) { - /* - * Either the GuC explicitly returned an error (which - * we convert to -EIO here) or no response at all was - * received within the timeout limit (-ETIMEDOUT) - */ - if (ret != -ETIMEDOUT) - ret = -EIO; - - DRM_WARN("Action 0x%X failed; ret=%d status=0x%08X response=0x%08X\n", - data[0], ret, status, I915_READ(SOFT_SCRATCH(15))); - - dev_priv->guc.action_fail += 1; - dev_priv->guc.action_err = ret; - } - dev_priv->guc.action_status = status; - - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - mutex_unlock(&guc->send_mutex); - - return ret; -} - /* * Tell the GuC to allocate or deallocate a specific doorbell */ @@ -137,69 +72,23 @@ static int guc_send(struct intel_guc *guc, u32 *data, u32 len) static int guc_allocate_doorbell(struct intel_guc *guc, struct i915_guc_client *client) { - u32 data[2]; + u32 action[] = { + INTEL_GUC_ACTION_ALLOCATE_DOORBELL, + client->ctx_index + }; - data[0] = INTEL_GUC_ACTION_ALLOCATE_DOORBELL; - data[1] = client->ctx_index; - - return guc_send(guc, data, 2); + return intel_guc_send(guc, action, ARRAY_SIZE(action)); } static int guc_release_doorbell(struct intel_guc *guc, struct i915_guc_client *client) { - u32 data[2]; - - data[0] = INTEL_GUC_ACTION_DEALLOCATE_DOORBELL; - data[1] = client->ctx_index; - - return guc_send(guc, data, 2); -} - -static int guc_sample_forcewake(struct intel_guc *guc, - struct i915_guc_client *client) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - u32 data[2]; - - data[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE; - /* WaRsDisableCoarsePowerGating:skl,bxt */ - if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) - data[1] = 0; - else - /* bit 0 and 1 are for Render and Media domain separately */ - data[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA; - - return guc_send(guc, data, ARRAY_SIZE(data)); -} - -static int guc_logbuffer_flush_complete(struct intel_guc *guc) -{ - u32 data[1]; - - data[0] = INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE; - - return guc_send(guc, data, 1); -} - -static int guc_force_logbuffer_flush(struct intel_guc *guc) -{ - u32 data[2]; - - data[0] = INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH; - data[1] = 0; - - return guc_send(guc, data, 2); -} - -static int guc_logging_control(struct intel_guc *guc, u32 control_val) -{ - u32 data[2]; - - data[0] = INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING; - data[1] = control_val; + u32 action[] = { + INTEL_GUC_ACTION_DEALLOCATE_DOORBELL, + client->ctx_index + }; - return guc_send(guc, data, 2); + return intel_guc_send(guc, action, ARRAY_SIZE(action)); } /* @@ -297,7 +186,7 @@ select_doorbell_register(struct intel_guc *guc, uint32_t priority) * Select, assign and relase doorbell cachelines * * These functions track which doorbell cachelines are in use. - * The data they manipulate is protected by the guc_send lock. + * The data they manipulate is protected by the intel_guc_send lock. */ static uint32_t select_doorbell_cacheline(struct intel_guc *guc) @@ -1525,7 +1414,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv) } guc->execbuf_client = client; - guc_sample_forcewake(guc, client); + intel_guc_sample_forcewake(guc); guc_init_doorbell_hw(guc); /* Take over from manual control of ELSP (execlists) */ @@ -1595,7 +1484,7 @@ int intel_guc_suspend(struct drm_device *dev) /* first page is shared data with GuC */ data[2] = i915_ggtt_offset(ctx->engine[RCS].state); - return guc_send(guc, data, ARRAY_SIZE(data)); + return intel_guc_send(guc, data, ARRAY_SIZE(data)); } @@ -1623,7 +1512,7 @@ int intel_guc_resume(struct drm_device *dev) /* first page is shared data with GuC */ data[2] = i915_ggtt_offset(ctx->engine[RCS].state); - return guc_send(guc, data, ARRAY_SIZE(data)); + return intel_guc_send(guc, data, ARRAY_SIZE(data)); } void i915_guc_capture_logs(struct drm_i915_private *dev_priv) @@ -1634,7 +1523,7 @@ void i915_guc_capture_logs(struct drm_i915_private *dev_priv) * time, so get/put should be really quick. */ intel_runtime_pm_get(dev_priv); - guc_logbuffer_flush_complete(&dev_priv->guc); + intel_guc_log_flush_complete(&dev_priv->guc); intel_runtime_pm_put(dev_priv); } @@ -1652,7 +1541,7 @@ void i915_guc_flush_logs(struct drm_i915_private *dev_priv) flush_work(&dev_priv->guc.log.flush_work); /* Ask GuC to update the log buffer state */ - guc_force_logbuffer_flush(&dev_priv->guc); + intel_guc_log_flush(&dev_priv->guc); /* GuC would have updated log buffer by now, so capture it */ i915_guc_capture_logs(dev_priv); @@ -1693,7 +1582,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) if (!log_param.logging_enabled && (i915.guc_log_level < 0)) return 0; - ret = guc_logging_control(&dev_priv->guc, log_param.value); + ret = intel_guc_log_control(&dev_priv->guc, log_param.value); if (ret < 0) { DRM_DEBUG_DRIVER("guc_logging_control action failed %d\n", ret); return ret; diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c new file mode 100644 index 000000000000..a24cf0822c3b --- /dev/null +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -0,0 +1,135 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "i915_drv.h" +#include "intel_uc.h" + +/* + * Read GuC command/status register (SOFT_SCRATCH_0) + * Return true if it contains a response rather than a command + */ +bool intel_guc_recv(struct drm_i915_private *dev_priv, u32 *status) +{ + u32 val = I915_READ(SOFT_SCRATCH(0)); + *status = val; + return INTEL_GUC_RECV_IS_RESPONSE(val); +} + +int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + u32 status; + int i; + int ret; + + if (WARN_ON(len < 1 || len > 15)) + return -EINVAL; + + mutex_lock(&guc->send_mutex); + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + + dev_priv->guc.action_count += 1; + dev_priv->guc.action_cmd = action[0]; + + for (i = 0; i < len; i++) + I915_WRITE(SOFT_SCRATCH(i), action[i]); + + POSTING_READ(SOFT_SCRATCH(i - 1)); + + I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER); + + /* + * Fast commands should complete in less than 10us, so sample quickly + * up to that length of time, then switch to a slower sleep-wait loop. + * No inte_guc_send command should ever take longer than 10ms. + */ + ret = wait_for_us(intel_guc_recv(dev_priv, &status), 10); + if (ret) + ret = wait_for(intel_guc_recv(dev_priv, &status), 10); + if (status != INTEL_GUC_STATUS_SUCCESS) { + /* + * Either the GuC explicitly returned an error (which + * we convert to -EIO here) or no response at all was + * received within the timeout limit (-ETIMEDOUT) + */ + if (ret != -ETIMEDOUT) + ret = -EIO; + + DRM_WARN("INTEL_GUC_SEND: Action 0x%X failed;" + " ret=%d status=0x%08X response=0x%08X\n", + action[0], ret, status, I915_READ(SOFT_SCRATCH(15))); + + dev_priv->guc.action_fail += 1; + dev_priv->guc.action_err = ret; + } + dev_priv->guc.action_status = status; + + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + mutex_unlock(&guc->send_mutex); + + return ret; +} + +int intel_guc_sample_forcewake(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + u32 action[2]; + + action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE; + /* WaRsDisableCoarsePowerGating:skl,bxt */ + if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) + action[1] = 0; + else + /* bit 0 and 1 are for Render and Media domain separately */ + action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA; + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + +int intel_guc_log_flush_complete(struct intel_guc *guc) +{ + u32 action[] = { INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE }; + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + +int intel_guc_log_flush(struct intel_guc *guc) +{ + u32 action[] = { + INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH, + 0 + }; + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + +int intel_guc_log_control(struct intel_guc *guc, u32 control_val) +{ + u32 action[] = { + INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING, + control_val + }; + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h index 4b4a91e0bfd5..a4dd0e05ba4f 100644 --- a/drivers/gpu/drm/i915/intel_uc.h +++ b/drivers/gpu/drm/i915/intel_uc.h @@ -168,6 +168,14 @@ struct intel_guc { struct mutex send_mutex; }; +/* intel_uc.c */ +bool intel_guc_recv(struct drm_i915_private *dev_priv, u32 *status); +int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len); +int intel_guc_sample_forcewake(struct intel_guc *guc); +int intel_guc_log_flush_complete(struct intel_guc *guc); +int intel_guc_log_flush(struct intel_guc *guc); +int intel_guc_log_control(struct intel_guc *guc, u32 control_val); + /* intel_guc_loader.c */ extern void intel_guc_init(struct drm_device *dev); extern int intel_guc_setup(struct drm_device *dev); -- cgit v1.2.3 From 413e8fdb6ae7ef074cb0393adada2658217ec48d Mon Sep 17 00:00:00 2001 From: Arkadiusz Hiler Date: Fri, 25 Nov 2016 18:59:36 +0100 Subject: drm/i915/guc: Init send_mutex in intel_uc_init_early() send_mutex is used to serialise communication with GuC via intel_guc_send(). Since functions that utilize it are no longer limited to submission, initialization should be handled as a part of general setup. v2: move initialization to *_early() Cc: Chris Wilson Cc: Michal Winiarski Signed-off-by: Arkadiusz Hiler Reviewed-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/1480096777-12573-5-git-send-email-arkadiusz.hiler@intel.com Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.c | 2 ++ drivers/gpu/drm/i915/i915_guc_submission.c | 1 - drivers/gpu/drm/i915/intel_uc.c | 5 +++++ drivers/gpu/drm/i915/intel_uc.h | 1 + 4 files changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 0fba4bb5655e..8dac298461c0 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -817,6 +817,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, mutex_init(&dev_priv->wm.wm_mutex); mutex_init(&dev_priv->pps_mutex); + intel_uc_init_early(dev_priv); + i915_memcpy_init_early(dev_priv); ret = i915_workqueues_init(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 72fb525b9a8b..125648253902 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -1388,7 +1388,6 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv) guc->ctx_pool_vma = vma; ida_init(&guc->ctx_ids); - mutex_init(&guc->send_mutex); guc_log_create(guc); guc_addon_create(guc); diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index a24cf0822c3b..8ae67954563c 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -25,6 +25,11 @@ #include "i915_drv.h" #include "intel_uc.h" +void intel_uc_init_early(struct drm_i915_private *dev_priv) +{ + mutex_init(&dev_priv->guc.send_mutex); +} + /* * Read GuC command/status register (SOFT_SCRATCH_0) * Return true if it contains a response rather than a command diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h index a4dd0e05ba4f..de2b314cb1d7 100644 --- a/drivers/gpu/drm/i915/intel_uc.h +++ b/drivers/gpu/drm/i915/intel_uc.h @@ -169,6 +169,7 @@ struct intel_guc { }; /* intel_uc.c */ +void intel_uc_init_early(struct drm_i915_private *dev_priv); bool intel_guc_recv(struct drm_i915_private *dev_priv, u32 *status); int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len); int intel_guc_sample_forcewake(struct intel_guc *guc); -- cgit v1.2.3 From 8d6faaf65e8f20be8b8c8af8ca50e88769a1d193 Mon Sep 17 00:00:00 2001 From: Arkadiusz Hiler Date: Fri, 25 Nov 2016 18:59:37 +0100 Subject: drm/i915/guc: Remove spurious include Cc: Chris Wilson Cc: Michal Winiarski Signed-off-by: Arkadiusz Hiler Reviewed-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/1480096777-12573-6-git-send-email-arkadiusz.hiler@intel.com Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_guc_submission.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 125648253902..f724a30a3232 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -21,7 +21,6 @@ * IN THE SOFTWARE. * */ -#include #include #include #include -- cgit v1.2.3 From 92117f0bce64268b841261774e45462cc7ff80af Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 28 Nov 2016 14:36:48 +0000 Subject: drm/i915: Move priority bumping for flips earlier David found another issue with priority bumping from mmioflips, where we are accessing the requests concurrently to them being retired and freed. Whilst we are skipping the dependency if has been submitted, that is not sufficient to stop the dependency from disappearing if another thread retires that request. To prevent we can either employ the struct_mutex (or a request mutex in the future) to serialise retiring before it is freed. Alternatively, we need to keep the dependencies alive using RCU whilst they are being accessed via the DFS. [ 1746.698111] general protection fault: 0000 [#1] PREEMPT SMP [ 1746.698305] Modules linked in: snd_hda_intel snd_hda_codec snd_hwdep x86_pkg_temp_thermal snd_hda_core coretemp crct10dif_pclmul crc32_pclmul snd_pcm ghash_clmulni_intel mei_me mei i915 e1000e ptp pps_core i2c_hid [ 1746.698750] CPU: 1 PID: 6716 Comm: kworker/u8:2 Not tainted 4.9.0-rc6-CI-Nightly_816+ #1 [ 1746.698871] Hardware name: GIGABYTE GB-BKi7A-7500/MFLP7AP-00, BIOS F1 07/27/2016 [ 1746.699125] Workqueue: events_unbound intel_mmio_flip_work_func [i915] [ 1746.699266] task: ffff880260a5e800 task.stack: ffffc90000f6c000 [ 1746.699361] RIP: 0010:[] [] execlists_schedule+0x8d/0x300 [i915] [ 1746.699632] RSP: 0018:ffffc90000f6fcd8 EFLAGS: 00010206 [ 1746.699724] RAX: dead0000000000f8 RBX: ffff8801f64b2bf0 RCX: ffff8801f64b2c10 [ 1746.699842] RDX: dead000000000100 RSI: 0000000000000000 RDI: ffff8801f64b0458 [ 1746.699972] RBP: ffffc90000f6fd68 R08: ffff88026488dc00 R09: 0000000000000002 [ 1746.700090] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000400 [ 1746.700195] R13: ffffc90000f6fcf0 R14: ffff88020955aa40 R15: ffff88020955aa68 [ 1746.700307] FS: 0000000000000000(0000) GS:ffff88026dc80000(0000) knlGS:0000000000000000 [ 1746.700435] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 1746.700532] CR2: 0000000002a69e90 CR3: 0000000002c07000 CR4: 00000000003406e0 [ 1746.700635] Stack: [ 1746.700682] ffff880260a5e880 ffffc90000f6fd50 ffffffff810af69a ffffc90000f6fd28 [ 1746.700827] ffff88020955a628 ffff8801e1eaebf0 0000000000000020 0000000000000000 [ 1746.700947] 00000196af1edc96 ffff88025dfa4000 ffff8801f0b030a8 ffffc90000f6fcf0 [ 1746.701071] Call Trace: [ 1746.701117] [] ? dequeue_entity+0x25a/0xb50 [ 1746.701260] [] fence_set_priority+0x7e/0x80 [i915] [ 1746.701406] [] i915_gem_object_wait_priority+0x85/0x160 [i915] [ 1746.701599] [] intel_mmio_flip_work_func+0x47/0x2b0 [i915] [ 1746.701717] [] process_one_work+0x14d/0x470 [ 1746.701809] [] worker_thread+0x43/0x4e0 [ 1746.701888] [] ? process_one_work+0x470/0x470 [ 1746.701969] [] ? process_one_work+0x470/0x470 [ 1746.702072] [] kthread+0xc5/0xe0 [ 1746.702152] [] ? _raw_spin_unlock_irq+0x9/0x10 [ 1746.702234] [] ? kthread_park+0x60/0x60 [ 1746.702318] [] ret_from_fork+0x22/0x30 [ 1746.702387] Code: 89 42 08 48 8b 45 88 48 89 55 c0 4c 89 6d c8 4c 8d 70 d8 4d 8d 7e 28 4d 39 ef 74 72 49 8b 1e 48 8b 13 48 39 d3 48 8d 42 f8 74 3e <48> 8b 10 8b 52 38 41 39 d4 7e 26 48 8b 50 30 48 8b 78 28 48 8d [ 1746.702921] RIP [] execlists_schedule+0x8d/0x300 [i915] Nov 25 21:42:54 kbl-gbbki7 kernel: [ 1746.703027] RSP Fixes: 27745e829a5c ("drm/i915/execlists: Use a local lock for dfs_link access") Fixes: 9a151987d709 ("drm/i915: Add execution priority boosting for mmioflips") Signed-off-by: Chris Wilson Cc: David Weinehall Cc: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20161128143649.4289-1-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/intel_display.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 972c7e4ebf03..7eb2399d9b40 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -12022,7 +12022,6 @@ static void intel_mmio_flip_work_func(struct work_struct *w) to_intel_framebuffer(crtc->base.primary->fb); struct drm_i915_gem_object *obj = intel_fb->obj; - i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY); WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0); intel_pipe_update_start(crtc); @@ -12278,6 +12277,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, i915_add_request_no_flush(request); } + i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY); i915_gem_track_fb(intel_fb_obj(old_fb), obj, to_intel_plane(primary)->frontbuffer_bit); mutex_unlock(&dev->struct_mutex); -- cgit v1.2.3 From 70cd14761d8aeb5db19a410a0a85155e88bb8ad0 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 28 Nov 2016 14:36:49 +0000 Subject: Revert "drm/i915/execlists: Use a local lock for dfs_link access" This reverts commit 27745e829a5c ("drm/i915/execlists: Use a local lock for dfs_link access") as the struct_mutex was required to prevent concurrent retiring and freeing, now restored in the previous patch. Signed-off-by: Chris Wilson Cc: David Weinehall Cc: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20161128143649.4289-2-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/intel_lrc.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 0a09024d6ca3..b2c0d509e191 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -695,7 +695,6 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked) static void execlists_schedule(struct drm_i915_gem_request *request, int prio) { - static DEFINE_MUTEX(lock); struct intel_engine_cs *engine = NULL; struct i915_dependency *dep, *p; struct i915_dependency stack; @@ -704,8 +703,8 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio) if (prio <= READ_ONCE(request->priotree.priority)) return; - /* Need global lock to use the temporary link inside i915_dependency */ - mutex_lock(&lock); + /* Need BKL in order to use the temporary link inside i915_dependency */ + lockdep_assert_held(&request->i915->drm.struct_mutex); stack.signaler = &request->priotree; list_add(&stack.dfs_link, &dfs); @@ -772,8 +771,6 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio) if (engine) spin_unlock_irq(&engine->timeline->lock); - mutex_unlock(&lock); - /* XXX Do we need to preempt to make room for us and our deps? */ } -- cgit v1.2.3 From ddbb271aea87fc6004d3c8bcdb0710e980c7ec85 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Mon, 28 Nov 2016 10:36:48 +0000 Subject: drm/i915: drop the struct_mutex when wedged or trying to reset We grab the struct_mutex in intel_crtc_page_flip, but if we are wedged or a reset is in progress we bail early but never seem to actually release the lock. Fixes: 7f1847ebf48b ("drm/i915: Simplify checking of GPU reset_counter in display pageflips") Cc: Chris Wilson Signed-off-by: Matthew Auld Link: http://patchwork.freedesktop.org/patch/msgid/20161128103648.9235-1-matthew.auld@intel.com Reviewed-by: Joonas Lahtinen Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Cc: # v4.7+ --- drivers/gpu/drm/i915/intel_display.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 7eb2399d9b40..04636552e98a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -12206,7 +12206,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error); if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) { ret = -EIO; - goto cleanup; + goto unlock; } atomic_inc(&intel_crtc->unpin_work_count); @@ -12295,6 +12295,7 @@ cleanup_unpin: intel_unpin_fb_obj(fb, crtc->primary->state->rotation); cleanup_pending: atomic_dec(&intel_crtc->unpin_work_count); +unlock: mutex_unlock(&dev->struct_mutex); cleanup: crtc->primary->fb = old_fb; -- cgit v1.2.3 From a18dbba8f01ffae229d00517994f84e7bfad4c49 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Tue, 29 Nov 2016 14:55:16 +0800 Subject: drm/i915: Move the release of PT page to the upper caller MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit a PT page will be released if it doesn't contain any meaningful mappings during PPGTT page table shrinking. The PT entry in the upper level will be set to a scratch entry. Normally this works nicely, but in virtualization world, the PPGTT page table is tracked by hypervisor. Releasing the PT page before modifying the upper level PT entry would cause extra efforts. As the tracked page has been returned to OS before losing track from hypervisor, it could be written in any pattern. Hypervisor has to recognize if a page is still being used as a PT page by validating these writing patterns. It's complicated. Better let the guest modify the PT entry in upper level PT first, then release the PT page. Reviewed-by: Chris Wilson Reviewed-by: Michał Winiarski Cc: Michał Winiarski Cc: Michel Thierry Cc: Joonas Lahtinen Cc: Chris Wilson Cc: Zhenyu Wang Cc: Zhiyuan Lv Signed-off-by: Zhi Wang Link: https://patchwork.freedesktop.org/patch/122697/msgid/1479728666-25333-1-git-send-email-zhi.a.wang@intel.com Signed-off-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/1480402516-22275-1-git-send-email-zhi.a.wang@intel.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index b4bde1452f2a..6cee70745fea 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -736,10 +736,8 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm, bitmap_clear(pt->used_ptes, pte, num_entries); - if (bitmap_empty(pt->used_ptes, GEN8_PTES)) { - free_pt(to_i915(vm->dev), pt); + if (bitmap_empty(pt->used_ptes, GEN8_PTES)) return true; - } pt_vaddr = kmap_px(pt); @@ -775,13 +773,12 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm, pde_vaddr = kmap_px(pd); pde_vaddr[pde] = scratch_pde; kunmap_px(ppgtt, pde_vaddr); + free_pt(to_i915(vm->dev), pt); } } - if (bitmap_empty(pd->used_pdes, I915_PDES)) { - free_pd(to_i915(vm->dev), pd); + if (bitmap_empty(pd->used_pdes, I915_PDES)) return true; - } return false; } @@ -795,7 +792,6 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm, uint64_t length) { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - struct drm_i915_private *dev_priv = to_i915(vm->dev); struct i915_page_directory *pd; uint64_t pdpe; gen8_ppgtt_pdpe_t *pdpe_vaddr; @@ -813,16 +809,14 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm, pdpe_vaddr[pdpe] = scratch_pdpe; kunmap_px(ppgtt, pdpe_vaddr); } + free_pd(to_i915(vm->dev), pd); } } mark_tlbs_dirty(ppgtt); - if (USES_FULL_48BIT_PPGTT(dev_priv) && - bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv))) { - free_pdp(dev_priv, pdp); + if (bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv))) return true; - } return false; } @@ -836,6 +830,7 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm, uint64_t start, uint64_t length) { + struct drm_i915_private *dev_priv = to_i915(vm->dev); struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_page_directory_pointer *pdp; uint64_t pml4e; @@ -854,6 +849,7 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm, pml4e_vaddr = kmap_px(pml4); pml4e_vaddr[pml4e] = scratch_pml4e; kunmap_px(ppgtt, pml4e_vaddr); + free_pdp(dev_priv, pdp); } } } -- cgit v1.2.3 From 3aaa8aba9f414a24106015fa4b5fb099755af033 Mon Sep 17 00:00:00 2001 From: Jérémy Lefaure Date: Mon, 28 Nov 2016 18:43:19 -0500 Subject: drm/i915: fix compilation warnings on maybe uninitialized pointers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Two warnings are produced by gcc (tested with gcc 6.2.1): drivers/gpu/drm/i915/intel_csr.c: In function ‘csr_load_work_fn’: drivers/gpu/drm/i915/intel_csr.c:400:5: error: ‘fw’ is used uninitialized in this function [-Werror=uninitialized] if (fw) ^ and In file included from drivers/gpu/drm/i915/i915_drv.h:47:0, from drivers/gpu/drm/i915/intel_guc_loader.c:30: drivers/gpu/drm/i915/intel_guc_loader.c: In function ‘intel_guc_init’: ./include/drm/drmP.h:228:2: error: ‘fw’ may be used uninitialized in this function -Werror=maybe-uninitialized] drm_printk(KERN_DEBUG, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) ^~~~~~~~~~ drivers/gpu/drm/i915/intel_guc_loader.c:595:25: note: ‘fw’ was declared here const struct firmware *fw; ^~ When CONFIG_DRM_I915_WERROR is set, those warnings break the build. Initializing fw pointer to NULL in both cases removes the warnings. Signed-off-by: Jérémy Lefaure Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161128234319.20800-1-jeremy.lefaure@lse.epita.fr --- drivers/gpu/drm/i915/intel_csr.c | 2 +- drivers/gpu/drm/i915/intel_guc_loader.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 10e9abcf0932..9cbb8d8363b4 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -389,7 +389,7 @@ static void csr_load_work_fn(struct work_struct *work) { struct drm_i915_private *dev_priv; struct intel_csr *csr; - const struct firmware *fw; + const struct firmware *fw = NULL; int ret; dev_priv = container_of(work, typeof(*dev_priv), csr.work); diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index 27f6fd57eb79..a330fa499384 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -592,7 +592,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) { struct pci_dev *pdev = dev->pdev; struct drm_i915_gem_object *obj; - const struct firmware *fw; + const struct firmware *fw = NULL; struct guc_css_header *css; size_t size; int err; -- cgit v1.2.3 From 49d73912cbfcaa3eba109a44ee71200c12fa27ef Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 29 Nov 2016 09:50:08 +0000 Subject: drm/i915: Convert vm->dev backpointer to vm->i915 99% of the time we access i915_address_space->dev we want the i915 device and not the drm device, so let's store the drm_i915_private backpointer instead. The only real complication here are the inlines in i915_vma.h where drm_i915_private is not yet defined and so we have to choose an alternate path for our asserts. Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Cc: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20161129095008.32622-1-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/i915_gem_evict.c | 12 ++--- drivers/gpu/drm/i915/i915_gem_fence_reg.c | 8 +-- drivers/gpu/drm/i915/i915_gem_gtt.c | 87 +++++++++++++++---------------- drivers/gpu/drm/i915/i915_gem_gtt.h | 2 +- drivers/gpu/drm/i915/i915_gem_tiling.c | 2 +- drivers/gpu/drm/i915/i915_vma.c | 10 ++-- drivers/gpu/drm/i915/i915_vma.h | 6 +-- drivers/gpu/drm/i915/intel_display.c | 5 +- drivers/gpu/drm/i915/intel_drv.h | 2 +- 10 files changed, 67 insertions(+), 69 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3d4e07e9734f..8ebefb6f6cf2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3532,7 +3532,7 @@ err_unpin_display: void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) { - lockdep_assert_held(&vma->vm->dev->struct_mutex); + lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); if (WARN_ON(vma->obj->pin_display == 0)) return; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index bd08814b015c..739ede7c89ed 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -96,7 +96,7 @@ i915_gem_evict_something(struct i915_address_space *vm, u64 start, u64 end, unsigned flags) { - struct drm_i915_private *dev_priv = to_i915(vm->dev); + struct drm_i915_private *dev_priv = vm->i915; struct list_head eviction_list; struct list_head *phases[] = { &vm->inactive_list, @@ -106,7 +106,7 @@ i915_gem_evict_something(struct i915_address_space *vm, struct i915_vma *vma, *next; int ret; - lockdep_assert_held(&vm->dev->struct_mutex); + lockdep_assert_held(&vm->i915->drm.struct_mutex); trace_i915_gem_evict(vm, min_size, alignment, flags); /* @@ -162,7 +162,7 @@ search_again: * back to userspace to give our workqueues time to * acquire our locks and unpin the old scanouts. */ - return intel_has_pending_fb_unpin(vm->dev) ? -EAGAIN : -ENOSPC; + return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC; } /* Not everything in the GGTT is tracked via vma (otherwise we @@ -217,7 +217,7 @@ i915_gem_evict_for_vma(struct i915_vma *target) { struct drm_mm_node *node, *next; - lockdep_assert_held(&target->vm->dev->struct_mutex); + lockdep_assert_held(&target->vm->i915->drm.struct_mutex); list_for_each_entry_safe(node, next, &target->vm->mm.head_node.node_list, @@ -272,11 +272,11 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) struct i915_vma *vma, *next; int ret; - lockdep_assert_held(&vm->dev->struct_mutex); + lockdep_assert_held(&vm->i915->drm.struct_mutex); trace_i915_gem_evict_vm(vm); if (do_idle) { - struct drm_i915_private *dev_priv = to_i915(vm->dev); + struct drm_i915_private *dev_priv = vm->i915; if (i915_is_ggtt(vm)) { ret = i915_gem_switch_to_kernel_context(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index 0efa3571afc3..d006bcb69e91 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -290,7 +290,7 @@ i915_vma_put_fence(struct i915_vma *vma) { struct drm_i915_fence_reg *fence = vma->fence; - assert_rpm_wakelock_held(to_i915(vma->vm->dev)); + assert_rpm_wakelock_held(vma->vm->i915); if (!fence) return 0; @@ -313,7 +313,7 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv) } /* Wait for completion of pending flips which consume fences */ - if (intel_has_pending_fb_unpin(&dev_priv->drm)) + if (intel_has_pending_fb_unpin(dev_priv)) return ERR_PTR(-EAGAIN); return ERR_PTR(-EDEADLK); @@ -346,7 +346,7 @@ i915_vma_get_fence(struct i915_vma *vma) /* Note that we revoke fences on runtime suspend. Therefore the user * must keep the device awake whilst using the fence. */ - assert_rpm_wakelock_held(to_i915(vma->vm->dev)); + assert_rpm_wakelock_held(vma->vm->i915); /* Just update our place in the LRU if our fence is getting reused. */ if (vma->fence) { @@ -357,7 +357,7 @@ i915_vma_get_fence(struct i915_vma *vma) return 0; } } else if (set) { - fence = fence_find(to_i915(vma->vm->dev)); + fence = fence_find(vma->vm->i915); if (IS_ERR(fence)) return PTR_ERR(fence); } else diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 6cee70745fea..18d0340e2be4 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -380,7 +380,7 @@ static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr) #define kmap_px(px) kmap_page_dma(px_base(px)) #define kunmap_px(ppgtt, vaddr) \ - kunmap_page_dma(to_i915((ppgtt)->base.dev), (vaddr)) + kunmap_page_dma((ppgtt)->base.i915, (vaddr)) #define setup_px(dev_priv, px) setup_page_dma((dev_priv), px_base(px)) #define cleanup_px(dev_priv, px) cleanup_page_dma((dev_priv), px_base(px)) @@ -470,7 +470,7 @@ static void gen8_initialize_pt(struct i915_address_space *vm, scratch_pte = gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC); - fill_px(to_i915(vm->dev), pt, scratch_pte); + fill_px(vm->i915, pt, scratch_pte); } static void gen6_initialize_pt(struct i915_address_space *vm, @@ -483,7 +483,7 @@ static void gen6_initialize_pt(struct i915_address_space *vm, scratch_pte = vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0); - fill32_px(to_i915(vm->dev), pt, scratch_pte); + fill32_px(vm->i915, pt, scratch_pte); } static struct i915_page_directory *alloc_pd(struct drm_i915_private *dev_priv) @@ -531,7 +531,7 @@ static void gen8_initialize_pd(struct i915_address_space *vm, scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC); - fill_px(to_i915(vm->dev), pd, scratch_pde); + fill_px(vm->i915, pd, scratch_pde); } static int __pdp_init(struct drm_i915_private *dev_priv, @@ -612,7 +612,7 @@ static void gen8_initialize_pdp(struct i915_address_space *vm, scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC); - fill_px(to_i915(vm->dev), pdp, scratch_pdpe); + fill_px(vm->i915, pdp, scratch_pdpe); } static void gen8_initialize_pml4(struct i915_address_space *vm, @@ -623,7 +623,7 @@ static void gen8_initialize_pml4(struct i915_address_space *vm, scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC); - fill_px(to_i915(vm->dev), pml4, scratch_pml4e); + fill_px(vm->i915, pml4, scratch_pml4e); } static void @@ -710,7 +710,7 @@ static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt, */ static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) { - ppgtt->pd_dirty_rings = INTEL_INFO(to_i915(ppgtt->base.dev))->ring_mask; + ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask; } /* Removes entries from a single page table, releasing it if it's empty. @@ -773,7 +773,7 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm, pde_vaddr = kmap_px(pd); pde_vaddr[pde] = scratch_pde; kunmap_px(ppgtt, pde_vaddr); - free_pt(to_i915(vm->dev), pt); + free_pt(vm->i915, pt); } } @@ -809,7 +809,7 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm, pdpe_vaddr[pdpe] = scratch_pdpe; kunmap_px(ppgtt, pdpe_vaddr); } - free_pd(to_i915(vm->dev), pd); + free_pd(vm->i915, pd); } } @@ -830,7 +830,6 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm, uint64_t start, uint64_t length) { - struct drm_i915_private *dev_priv = to_i915(vm->dev); struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_page_directory_pointer *pdp; uint64_t pml4e; @@ -838,7 +837,7 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm, gen8_ppgtt_pml4e_t scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC); - GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))); + GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(vm->i915)); gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { if (WARN_ON(!pml4->pdps[pml4e])) @@ -849,7 +848,7 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm, pml4e_vaddr = kmap_px(pml4); pml4e_vaddr[pml4e] = scratch_pml4e; kunmap_px(ppgtt, pml4e_vaddr); - free_pdp(dev_priv, pdp); + free_pdp(vm->i915, pdp); } } } @@ -951,7 +950,7 @@ static void gen8_free_page_tables(struct drm_i915_private *dev_priv, static int gen8_init_scratch(struct i915_address_space *vm) { - struct drm_i915_private *dev_priv = to_i915(vm->dev); + struct drm_i915_private *dev_priv = vm->i915; int ret; ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA); @@ -998,7 +997,7 @@ free_scratch_page: static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) { enum vgt_g2v_type msg; - struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); + struct drm_i915_private *dev_priv = ppgtt->base.i915; int i; if (USES_FULL_48BIT_PPGTT(dev_priv)) { @@ -1028,7 +1027,7 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) static void gen8_free_scratch(struct i915_address_space *vm) { - struct drm_i915_private *dev_priv = to_i915(vm->dev); + struct drm_i915_private *dev_priv = vm->i915; if (USES_FULL_48BIT_PPGTT(dev_priv)) free_pdp(dev_priv, vm->scratch_pdp); @@ -1055,7 +1054,7 @@ static void gen8_ppgtt_cleanup_3lvl(struct drm_i915_private *dev_priv, static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt) { - struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); + struct drm_i915_private *dev_priv = ppgtt->base.i915; int i; for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) { @@ -1070,7 +1069,7 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt) static void gen8_ppgtt_cleanup(struct i915_address_space *vm) { - struct drm_i915_private *dev_priv = to_i915(vm->dev); + struct drm_i915_private *dev_priv = vm->i915; struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); if (intel_vgpu_active(dev_priv)) @@ -1108,7 +1107,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm, uint64_t length, unsigned long *new_pts) { - struct drm_i915_private *dev_priv = to_i915(vm->dev); + struct drm_i915_private *dev_priv = vm->i915; struct i915_page_table *pt; uint32_t pde; @@ -1169,7 +1168,7 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm, uint64_t length, unsigned long *new_pds) { - struct drm_i915_private *dev_priv = to_i915(vm->dev); + struct drm_i915_private *dev_priv = vm->i915; struct i915_page_directory *pd; uint32_t pdpe; uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv); @@ -1222,7 +1221,7 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm, uint64_t length, unsigned long *new_pdps) { - struct drm_i915_private *dev_priv = to_i915(vm->dev); + struct drm_i915_private *dev_priv = vm->i915; struct i915_page_directory_pointer *pdp; uint32_t pml4e; @@ -1297,7 +1296,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); unsigned long *new_page_dirs, *new_page_tables; - struct drm_i915_private *dev_priv = to_i915(vm->dev); + struct drm_i915_private *dev_priv = vm->i915; struct i915_page_directory *pd; const uint64_t orig_start = start; const uint64_t orig_length = length; @@ -1446,7 +1445,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm, err_out: for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4) - gen8_ppgtt_cleanup_3lvl(to_i915(vm->dev), pml4->pdps[pml4e]); + gen8_ppgtt_cleanup_3lvl(vm->i915, pml4->pdps[pml4e]); return ret; } @@ -1580,7 +1579,7 @@ static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt) */ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) { - struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); + struct drm_i915_private *dev_priv = ppgtt->base.i915; int ret; ret = gen8_init_scratch(&ppgtt->base); @@ -1923,7 +1922,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, uint64_t start_in, uint64_t length_in) { DECLARE_BITMAP(new_page_tables, I915_PDES); - struct drm_i915_private *dev_priv = to_i915(vm->dev); + struct drm_i915_private *dev_priv = vm->i915; struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_page_table *pt; @@ -2010,7 +2009,7 @@ unwind_out: static int gen6_init_scratch(struct i915_address_space *vm) { - struct drm_i915_private *dev_priv = to_i915(vm->dev); + struct drm_i915_private *dev_priv = vm->i915; int ret; ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA); @@ -2030,7 +2029,7 @@ static int gen6_init_scratch(struct i915_address_space *vm) static void gen6_free_scratch(struct i915_address_space *vm) { - struct drm_i915_private *dev_priv = to_i915(vm->dev); + struct drm_i915_private *dev_priv = vm->i915; free_pt(dev_priv, vm->scratch_pt); cleanup_scratch_page(dev_priv, &vm->scratch_page); @@ -2040,7 +2039,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_page_directory *pd = &ppgtt->pd; - struct drm_i915_private *dev_priv = to_i915(vm->dev); + struct drm_i915_private *dev_priv = vm->i915; struct i915_page_table *pt; uint32_t pde; @@ -2056,7 +2055,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) { struct i915_address_space *vm = &ppgtt->base; - struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); + struct drm_i915_private *dev_priv = ppgtt->base.i915; struct i915_ggtt *ggtt = &dev_priv->ggtt; bool retried = false; int ret; @@ -2121,7 +2120,7 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt, static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) { - struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); + struct drm_i915_private *dev_priv = ppgtt->base.i915; struct i915_ggtt *ggtt = &dev_priv->ggtt; int ret; @@ -2172,7 +2171,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt, struct drm_i915_private *dev_priv) { - ppgtt->base.dev = &dev_priv->drm; + ppgtt->base.i915 = dev_priv; if (INTEL_INFO(dev_priv)->gen < 8) return gen6_ppgtt_init(ppgtt); @@ -2394,7 +2393,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm, enum i915_cache_level level, u32 unused) { - struct drm_i915_private *dev_priv = to_i915(vm->dev); + struct drm_i915_private *dev_priv = vm->i915; gen8_pte_t __iomem *pte = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + (offset >> PAGE_SHIFT); @@ -2410,7 +2409,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, uint64_t start, enum i915_cache_level level, u32 unused) { - struct drm_i915_private *dev_priv = to_i915(vm->dev); + struct drm_i915_private *dev_priv = vm->i915; struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); struct sgt_iter sgt_iter; gen8_pte_t __iomem *gtt_entries; @@ -2475,7 +2474,7 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm, enum i915_cache_level level, u32 flags) { - struct drm_i915_private *dev_priv = to_i915(vm->dev); + struct drm_i915_private *dev_priv = vm->i915; gen6_pte_t __iomem *pte = (gen6_pte_t __iomem *)dev_priv->ggtt.gsm + (offset >> PAGE_SHIFT); @@ -2497,7 +2496,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, uint64_t start, enum i915_cache_level level, u32 flags) { - struct drm_i915_private *dev_priv = to_i915(vm->dev); + struct drm_i915_private *dev_priv = vm->i915; struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); struct sgt_iter sgt_iter; gen6_pte_t __iomem *gtt_entries; @@ -2617,7 +2616,7 @@ static int ggtt_bind_vma(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { - struct drm_i915_private *i915 = to_i915(vma->vm->dev); + struct drm_i915_private *i915 = vma->vm->i915; struct drm_i915_gem_object *obj = vma->obj; u32 pte_flags = 0; int ret; @@ -2649,7 +2648,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { - struct drm_i915_private *i915 = to_i915(vma->vm->dev); + struct drm_i915_private *i915 = vma->vm->i915; u32 pte_flags; int ret; @@ -2683,7 +2682,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, static void ggtt_unbind_vma(struct i915_vma *vma) { - struct drm_i915_private *i915 = to_i915(vma->vm->dev); + struct drm_i915_private *i915 = vma->vm->i915; struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt; const u64 size = min(vma->size, vma->node.size); @@ -2925,8 +2924,8 @@ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl) static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) { - struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev); - struct pci_dev *pdev = ggtt->base.dev->pdev; + struct drm_i915_private *dev_priv = ggtt->base.i915; + struct pci_dev *pdev = dev_priv->drm.pdev; phys_addr_t phys_addr; int ret; @@ -3038,12 +3037,12 @@ static void gen6_gmch_remove(struct i915_address_space *vm) struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); iounmap(ggtt->gsm); - cleanup_scratch_page(to_i915(vm->dev), &vm->scratch_page); + cleanup_scratch_page(vm->i915, &vm->scratch_page); } static int gen8_gmch_probe(struct i915_ggtt *ggtt) { - struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev); + struct drm_i915_private *dev_priv = ggtt->base.i915; struct pci_dev *pdev = dev_priv->drm.pdev; unsigned int size; u16 snb_gmch_ctl; @@ -3092,7 +3091,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) static int gen6_gmch_probe(struct i915_ggtt *ggtt) { - struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev); + struct drm_i915_private *dev_priv = ggtt->base.i915; struct pci_dev *pdev = dev_priv->drm.pdev; unsigned int size; u16 snb_gmch_ctl; @@ -3145,7 +3144,7 @@ static void i915_gmch_remove(struct i915_address_space *vm) static int i915_gmch_probe(struct i915_ggtt *ggtt) { - struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev); + struct drm_i915_private *dev_priv = ggtt->base.i915; int ret; ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL); @@ -3180,7 +3179,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv) struct i915_ggtt *ggtt = &dev_priv->ggtt; int ret; - ggtt->base.dev = &dev_priv->drm; + ggtt->base.i915 = dev_priv; if (INTEL_GEN(dev_priv) <= 5) ret = i915_gmch_probe(ggtt); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 4f35be4c26c7..8965bbb13db7 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -220,7 +220,7 @@ struct i915_pml4 { struct i915_address_space { struct drm_mm mm; struct i915_gem_timeline timeline; - struct drm_device *dev; + struct drm_i915_private *i915; /* Every address space belongs to a struct file - except for the global * GTT that is owned by the driver (and so @file is set to NULL). In * principle, no information should leak from one context to another diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index c85e7b06bdba..62ad375de6ca 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -119,7 +119,7 @@ i915_tiling_ok(struct drm_i915_private *dev_priv, static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode) { - struct drm_i915_private *dev_priv = to_i915(vma->vm->dev); + struct drm_i915_private *dev_priv = vma->vm->i915; u32 size; if (!i915_vma_is_map_and_fenceable(vma)) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index a792dcb902b5..4c91a68ecb6d 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -198,9 +198,9 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) void __iomem *ptr; /* Access through the GTT requires the device to be awake. */ - assert_rpm_wakelock_held(to_i915(vma->vm->dev)); + assert_rpm_wakelock_held(vma->vm->i915); - lockdep_assert_held(&vma->vm->dev->struct_mutex); + lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) return IO_ERR_PTR(-ENODEV); @@ -347,7 +347,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, static int i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) { - struct drm_i915_private *dev_priv = to_i915(vma->vm->dev); + struct drm_i915_private *dev_priv = vma->vm->i915; struct drm_i915_gem_object *obj = vma->obj; u64 start, end; int ret; @@ -469,7 +469,7 @@ int __i915_vma_do_pin(struct i915_vma *vma, unsigned int bound = vma->flags; int ret; - lockdep_assert_held(&vma->vm->dev->struct_mutex); + lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0); GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma)); @@ -567,7 +567,7 @@ int i915_vma_unbind(struct i915_vma *vma) for_each_active(active, idx) { ret = i915_gem_active_retire(&vma->last_read[idx], - &vma->vm->dev->struct_mutex); + &vma->vm->i915->drm.struct_mutex); if (ret) break; } diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 85446f0b0b3f..21be74c61065 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -282,7 +282,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma); */ static inline void i915_vma_unpin_iomap(struct i915_vma *vma) { - lockdep_assert_held(&vma->vm->dev->struct_mutex); + lockdep_assert_held(&vma->obj->base.dev->struct_mutex); GEM_BUG_ON(vma->iomap == NULL); i915_vma_unpin(vma); } @@ -311,7 +311,7 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma) static inline bool i915_vma_pin_fence(struct i915_vma *vma) { - lockdep_assert_held(&vma->vm->dev->struct_mutex); + lockdep_assert_held(&vma->obj->base.dev->struct_mutex); if (vma->fence) { vma->fence->pin_count++; return true; @@ -330,7 +330,7 @@ i915_vma_pin_fence(struct i915_vma *vma) static inline void i915_vma_unpin_fence(struct i915_vma *vma) { - lockdep_assert_held(&vma->vm->dev->struct_mutex); + lockdep_assert_held(&vma->obj->base.dev->struct_mutex); if (vma->fence) { GEM_BUG_ON(vma->fence->pin_count <= 0); vma->fence->pin_count--; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 04636552e98a..3b9e60aeca51 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4228,9 +4228,8 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc) udelay(100); } -bool intel_has_pending_fb_unpin(struct drm_device *dev) +bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc; /* Note that we don't need to be called with mode_config.lock here @@ -4240,7 +4239,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev) * cannot claim and pin a new fb without at least acquring the * struct_mutex and so serialising with us. */ - for_each_intel_crtc(dev, crtc) { + for_each_intel_crtc(&dev_priv->drm, crtc) { if (atomic_read(&crtc->unpin_work_count) == 0) continue; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 2b71567b5292..7f43c2f7d9d6 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1219,7 +1219,7 @@ unsigned int intel_fb_xy_to_linear(int x, int y, void intel_add_fb_offsets(int *x, int *y, const struct intel_plane_state *state, int plane); unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info); -bool intel_has_pending_fb_unpin(struct drm_device *dev); +bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv); void intel_mark_busy(struct drm_i915_private *dev_priv); void intel_mark_idle(struct drm_i915_private *dev_priv); void intel_crtc_restore_mode(struct drm_crtc *crtc); -- cgit v1.2.3 From 24603935830b2bb2a8536ea1b68d49a9f82451fe Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 23 Nov 2016 15:07:14 +0000 Subject: drm/i915/perf: Wrap 64bit divides in do_div() Just a couple of naked 64bit divides causing link errors on 32bit builds, with: ERROR: "__udivdi3" [drivers/gpu/drm/i915/i915.ko] undefined! v2: do_div() is only u64/u32, we need a u32/u64! v3: div_u64() == u64/u32, div64_u64() == u64/u64 Reported-by: kbuild test robot Fixes: d79651522e89 ("drm/i915: Enable i915 perf stream for Haswell OA unit") Signed-off-by: Chris Wilson Cc: Robert Bragg Link: http://patchwork.freedesktop.org/patch/msgid/20161123150714.24449-1-chris@chris-wilson.co.uk Reviewed-by: Robert Bragg --- drivers/gpu/drm/i915/i915_perf.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 95512824922b..14de9a4eee27 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -974,8 +974,8 @@ static void i915_oa_stream_disable(struct i915_perf_stream *stream) static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent) { - return 1000000000ULL * (2ULL << exponent) / - dev_priv->perf.oa.timestamp_frequency; + return div_u64(1000000000ULL * (2ULL << exponent), + dev_priv->perf.oa.timestamp_frequency); } static const struct i915_perf_stream_ops i915_oa_stream_ops = { @@ -1051,16 +1051,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, dev_priv->perf.oa.periodic = props->oa_periodic; if (dev_priv->perf.oa.periodic) { - u64 period_ns = oa_exponent_to_ns(dev_priv, - props->oa_period_exponent); + u32 tail; dev_priv->perf.oa.period_exponent = props->oa_period_exponent; /* See comment for OA_TAIL_MARGIN_NSEC for details * about this tail_margin... */ - dev_priv->perf.oa.tail_margin = - ((OA_TAIL_MARGIN_NSEC / period_ns) + 1) * format_size; + tail = div64_u64(OA_TAIL_MARGIN_NSEC, + oa_exponent_to_ns(dev_priv, + props->oa_period_exponent)); + dev_priv->perf.oa.tail_margin = (tail + 1) * format_size; } if (stream->ctx) { -- cgit v1.2.3 From c6385c947f4d1526d823a16ea25daa93d2897997 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 29 Nov 2016 12:42:05 +0000 Subject: drm/i915: Fix tracepoint compilation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/gpu/drm/i915/./i915_trace.h: In function ‘trace_event_raw_event_i915_gem_evict’: drivers/gpu/drm/i915/./i915_trace.h:409:24: error: ‘struct i915_address_space’ has no member named ‘dev’ __entry->dev = vm->dev->primary->index; A couple of macros missed in the s/vm->dev/vm->i915/ conversion. Fixes: 49d73912cbfc ("drm/i915: Convert vm->dev backpointer to vm->i915") Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Cc: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20161129124205.19351-1-chris@chris-wilson.co.uk Reviewed-by: Michał Winiarski --- drivers/gpu/drm/i915/i915_gem_gtt.c | 10 +++++----- drivers/gpu/drm/i915/i915_trace.h | 6 +++--- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 18d0340e2be4..02fb063302bf 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -858,7 +858,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - if (USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) + if (USES_FULL_48BIT_PPGTT(vm->i915)) gen8_ppgtt_clear_pml4(vm, &ppgtt->pml4, start, length); else gen8_ppgtt_clear_pdp(vm, &ppgtt->pdp, start, length); @@ -893,7 +893,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm, kunmap_px(ppgtt, pt_vaddr); pt_vaddr = NULL; if (++pde == I915_PDES) { - if (++pdpe == I915_PDPES_PER_PDP(to_i915(vm->dev))) + if (++pdpe == I915_PDPES_PER_PDP(vm->i915)) break; pde = 0; } @@ -916,7 +916,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, __sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0); - if (!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) { + if (!USES_FULL_48BIT_PPGTT(vm->i915)) { gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start, cache_level); } else { @@ -1455,7 +1455,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm, { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - if (USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) + if (USES_FULL_48BIT_PPGTT(vm->i915)) return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length); else return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length); @@ -1526,7 +1526,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC); - if (!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) { + if (!USES_FULL_48BIT_PPGTT(vm->i915)) { gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m); } else { uint64_t pml4e; diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index c5d210ebaa9a..240067705e1c 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -406,7 +406,7 @@ TRACE_EVENT(i915_gem_evict, ), TP_fast_assign( - __entry->dev = vm->dev->primary->index; + __entry->dev = vm->i915->drm.primary->index; __entry->vm = vm; __entry->size = size; __entry->align = align; @@ -443,7 +443,7 @@ TRACE_EVENT(i915_gem_evict_vm, ), TP_fast_assign( - __entry->dev = vm->dev->primary->index; + __entry->dev = vm->i915->drm.primary->index; __entry->vm = vm; ), @@ -711,7 +711,7 @@ DECLARE_EVENT_CLASS(i915_ppgtt, TP_fast_assign( __entry->vm = vm; - __entry->dev = vm->dev->primary->index; + __entry->dev = vm->i915->drm.primary->index; ), TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm) -- cgit v1.2.3 From 9a148a96fc3a654ddcf142a7ab7db37b972ba5d8 Mon Sep 17 00:00:00 2001 From: Libin Yang Date: Mon, 28 Nov 2016 20:07:05 +0800 Subject: drm/i915/debugfs: add dp mst info Add the DP MST info dump in debugfs. Signed-off-by: Dhinakaran Pandiyan Signed-off-by: Libin Yang Reviewed-by: Lyude Reviewed-by: Jani Nikula Signed-off-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/1480334827-112273-1-git-send-email-libin.yang@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 4a431d9c700c..db53fd09f8df 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2877,6 +2877,20 @@ static void intel_dp_info(struct seq_file *m, &intel_dp->aux); } +static void intel_dp_mst_info(struct seq_file *m, + struct intel_connector *intel_connector) +{ + struct intel_encoder *intel_encoder = intel_connector->encoder; + struct intel_dp_mst_encoder *intel_mst = + enc_to_mst(&intel_encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, + intel_connector->port); + + seq_printf(m, "\taudio support: %s\n", yesno(has_audio)); +} + static void intel_hdmi_info(struct seq_file *m, struct intel_connector *intel_connector) { @@ -2919,7 +2933,10 @@ static void intel_connector_info(struct seq_file *m, switch (connector->connector_type) { case DRM_MODE_CONNECTOR_DisplayPort: case DRM_MODE_CONNECTOR_eDP: - intel_dp_info(m, intel_connector); + if (intel_encoder->type == INTEL_OUTPUT_DP_MST) + intel_dp_mst_info(m, intel_connector); + else + intel_dp_info(m, intel_connector); break; case DRM_MODE_CONNECTOR_LVDS: if (intel_encoder->type == INTEL_OUTPUT_LVDS) -- cgit v1.2.3 From 9935f7fa2854355203e3976762eecfb218079aac Mon Sep 17 00:00:00 2001 From: Libin Yang Date: Mon, 28 Nov 2016 20:07:06 +0800 Subject: drm/i915: abstract ddi being audio enabled Prepare for using the same code for judging ddi being audio enabled. No functional changes. Signed-off-by: Dhinakaran Pandiyan Signed-off-by: Libin Yang Reviewed-by: Lyude Reviewed-by: Jani Nikula Signed-off-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/1480334827-112273-2-git-send-email-libin.yang@intel.com --- drivers/gpu/drm/i915/intel_ddi.c | 20 +++++++++++++++----- drivers/gpu/drm/i915/intel_drv.h | 2 ++ 2 files changed, 17 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index f8e939d17160..8b47efa37e47 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1950,6 +1950,19 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) udelay(600); } +bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv, + struct intel_crtc *intel_crtc) +{ + u32 temp; + + if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) { + temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); + if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe)) + return true; + } + return false; +} + void intel_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { @@ -2015,11 +2028,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder, break; } - if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) { - temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); - if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe)) - pipe_config->has_audio = true; - } + pipe_config->has_audio = + intel_ddi_is_audio_enabled(dev_priv, intel_crtc); if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.bpp && pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 7f43c2f7d9d6..460f3742ebd7 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1175,6 +1175,8 @@ bool intel_ddi_pll_select(struct intel_crtc *crtc, void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp); bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); +bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv, + struct intel_crtc *intel_crtc); void intel_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config); struct intel_encoder * -- cgit v1.2.3 From 7f9e77545b92bcb894b8e2be5646535e8ba8da9e Mon Sep 17 00:00:00 2001 From: Libin Yang Date: Mon, 28 Nov 2016 20:07:07 +0800 Subject: drm/i915: enable dp mst audio This patch adds support for DP MST audio in i915. Enable audio codec when DP MST is enabled if has_audio flag is set. Disable audio codec when DP MST is disabled if has_audio flag is set. Another separated patches to support DP MST audio will be implemented in audio driver. This patch is ported from commit 3708d5e082c3 ("drm/i915: start adding dp mst audio") And because commit 3708d5e082c3 ("drm/i915: start adding dp mst audio") breaks MST multi-monitor setups on some platforms, the orignal patch is reverted by commit be754b101f70 ("Revert "drm/i915: start adding dp mst audio"") As the multi-monitor setups issue is fixed, let's port the patch and enable the dp mst audio. Signed-off-by: Libin Yang Cc: Lyude Cc: Jani Nikula Cc: Rodrigo Vivi Cc: Dhinakaran Pandiyan Reviewed-by: Jani Nikula Signed-off-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/1480334827-112273-3-git-send-email-libin.yang@intel.com --- drivers/gpu/drm/i915/intel_dp_mst.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index b029d1026a28..1f98757f61e7 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -37,6 +37,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; + struct intel_connector *connector = + to_intel_connector(conn_state->connector); struct drm_atomic_state *state; int bpp; int lane_count, slots; @@ -58,6 +60,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, state = pipe_config->base.state; + if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, connector->port)) + pipe_config->has_audio = true; mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp); pipe_config->pbn = mst_pbn; @@ -83,6 +87,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder, struct intel_dp *intel_dp = &intel_dig_port->dp; struct intel_connector *connector = to_intel_connector(old_conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int ret; DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); @@ -93,6 +98,10 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder, if (ret) { DRM_ERROR("failed to update payload %d\n", ret); } + if (old_crtc_state->has_audio) { + intel_audio_codec_disable(encoder); + intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO); + } } static void intel_mst_post_disable_dp(struct intel_encoder *encoder, @@ -205,6 +214,10 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder, ret = drm_dp_check_act_status(&intel_dp->mst_mgr); ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr); + if (pipe_config->has_audio) { + intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); + intel_audio_codec_enable(encoder, pipe_config, conn_state); + } } static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder, @@ -227,6 +240,9 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder, enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; u32 temp, flags = 0; + pipe_config->has_audio = + intel_ddi_is_audio_enabled(dev_priv, crtc); + temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); if (temp & TRANS_DDI_PHSYNC) flags |= DRM_MODE_FLAG_PHSYNC; -- cgit v1.2.3 From 334636c67e86afff9334b64e942bad80bbdf869f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 29 Nov 2016 12:10:20 +0000 Subject: drm/i915: Trim i915_guc_info() stack usage i915_guc_info() (part of debugfs output) tries to avoid holding struct_mutex for a long period by copying onto the stack. This causes a warning that the stack frame is massive, so stop doing that. We can even forgo holding the struct_mutex here as that doesn't serialise the values being read (and the lists used exist for the device lifetime). v2: Skip printing anything if guc->execbuf_client is disabled (avoids potential NULL dereference). Signed-off-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/20161129121024.22650-2-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/i915_debugfs.c | 46 ++++++++++++++++--------------------- 1 file changed, 20 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index db53fd09f8df..144d8066eb59 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2434,47 +2434,41 @@ static void i915_guc_client_info(struct seq_file *m, static int i915_guc_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; - struct intel_guc guc; - struct i915_guc_client client = {}; + const struct intel_guc *guc = &dev_priv->guc; struct intel_engine_cs *engine; enum intel_engine_id id; - u64 total = 0; - - if (!HAS_GUC_SCHED(dev_priv)) - return 0; + u64 total; - if (mutex_lock_interruptible(&dev->struct_mutex)) + if (!guc->execbuf_client) { + seq_printf(m, "GuC submission %s\n", + HAS_GUC_SCHED(dev_priv) ? + "disabled" : + "not supported"); return 0; - - /* Take a local copy of the GuC data, so we can dump it at leisure */ - guc = dev_priv->guc; - if (guc.execbuf_client) - client = *guc.execbuf_client; - - mutex_unlock(&dev->struct_mutex); + } seq_printf(m, "Doorbell map:\n"); - seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc.doorbell_bitmap); - seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc.db_cacheline); + seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc->doorbell_bitmap); + seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline); - seq_printf(m, "GuC total action count: %llu\n", guc.action_count); - seq_printf(m, "GuC action failure count: %u\n", guc.action_fail); - seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd); - seq_printf(m, "GuC last action status: 0x%x\n", guc.action_status); - seq_printf(m, "GuC last action error code: %d\n", guc.action_err); + seq_printf(m, "GuC total action count: %llu\n", guc->action_count); + seq_printf(m, "GuC action failure count: %u\n", guc->action_fail); + seq_printf(m, "GuC last action command: 0x%x\n", guc->action_cmd); + seq_printf(m, "GuC last action status: 0x%x\n", guc->action_status); + seq_printf(m, "GuC last action error code: %d\n", guc->action_err); + total = 0; seq_printf(m, "\nGuC submissions:\n"); for_each_engine(engine, dev_priv, id) { - u64 submissions = guc.submissions[id]; + u64 submissions = guc->submissions[id]; total += submissions; seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n", - engine->name, submissions, guc.last_seqno[id]); + engine->name, submissions, guc->last_seqno[id]); } seq_printf(m, "\t%s: %llu\n", "Total", total); - seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client); - i915_guc_client_info(m, dev_priv, &client); + seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client); + i915_guc_client_info(m, dev_priv, guc->execbuf_client); i915_guc_log_info(m, dev_priv); -- cgit v1.2.3 From 357248bfe592f8a34be5fb76bb8154710403785e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 29 Nov 2016 12:10:21 +0000 Subject: drm/i915/guc: Rename client->cookie to match use The client->cookie is a shadow of the doorbell->cookie value, so rename it to indicate its association with the doorbell, like the doorbell id and offset. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20161129121024.22650-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- drivers/gpu/drm/i915/i915_guc_submission.c | 6 +++--- drivers/gpu/drm/i915/intel_uc.h | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 144d8066eb59..d179eae1b1f6 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2414,7 +2414,7 @@ static void i915_guc_client_info(struct seq_file *m, seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n", client->priority, client->ctx_index, client->proc_desc_offset); seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n", - client->doorbell_id, client->doorbell_offset, client->cookie); + client->doorbell_id, client->doorbell_offset, client->doorbell_cookie); seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n", client->wq_size, client->wq_offset, client->wq_tail); diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index f724a30a3232..aaaf3928394e 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -454,11 +454,11 @@ static int guc_ring_doorbell(struct i915_guc_client *gc) /* current cookie */ db_cmp.db_status = GUC_DOORBELL_ENABLED; - db_cmp.cookie = gc->cookie; + db_cmp.cookie = gc->doorbell_cookie; /* cookie to be updated */ db_exc.db_status = GUC_DOORBELL_ENABLED; - db_exc.cookie = gc->cookie + 1; + db_exc.cookie = gc->doorbell_cookie + 1; if (db_exc.cookie == 0) db_exc.cookie = 1; @@ -473,7 +473,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc) /* if the exchange was successfully executed */ if (db_ret.value_qw == db_cmp.value_qw) { /* db was successfully rung */ - gc->cookie = db_exc.cookie; + gc->doorbell_cookie = db_exc.cookie; ret = 0; break; } diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h index de2b314cb1d7..8507a8f35b72 100644 --- a/drivers/gpu/drm/i915/intel_uc.h +++ b/drivers/gpu/drm/i915/intel_uc.h @@ -74,7 +74,7 @@ struct i915_guc_client { uint32_t proc_desc_offset; uint32_t doorbell_offset; - uint32_t cookie; + uint32_t doorbell_cookie; uint16_t doorbell_id; uint16_t padding[3]; /* Maintain alignment */ -- cgit v1.2.3 From 597bdc8bb2e8a8318d9c19bc285a95532a9ffe52 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 29 Nov 2016 12:10:22 +0000 Subject: drm/i915/guc: Initialise doorbell cookie to matching value Set the initial value of the doorbell cookie from the client. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20161129121024.22650-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_guc_submission.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index aaaf3928394e..dd879f0b077d 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -134,8 +134,8 @@ static int guc_update_doorbell_id(struct intel_guc *guc, /* Activate the new doorbell */ __set_bit(new_id, doorbell_bitmap); - doorbell->cookie = 0; doorbell->db_status = GUC_DOORBELL_ENABLED; + doorbell->cookie = client->doorbell_cookie; return guc_allocate_doorbell(guc, client); } -- cgit v1.2.3 From 4d357af49918e21dde6ecfb9b2f431fe0ab3eff8 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 29 Nov 2016 12:10:23 +0000 Subject: drm/i915/guc: Keep the execbuf client allocated across reset In order to avoid some complexity in trying to reconstruct the workqueues across reset, remember them instead. The issue comes when we have to handle a reset between request allocation and submission, the request has reserved space in the wq, but is not in any list so we fail to restore the reserved space. By keeping the execbuf client intact across the reset, we also keep the reservations. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20161129121024.22650-5-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_guc_submission.c | 84 +++++++++++++++++++----------- 1 file changed, 53 insertions(+), 31 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index dd879f0b077d..b0f858cf2881 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -139,13 +139,6 @@ static int guc_update_doorbell_id(struct intel_guc *guc, return guc_allocate_doorbell(guc, client); } -static int guc_init_doorbell(struct intel_guc *guc, - struct i915_guc_client *client, - uint16_t db_id) -{ - return guc_update_doorbell_id(guc, client, db_id); -} - static void guc_disable_doorbell(struct intel_guc *guc, struct i915_guc_client *client) { @@ -666,8 +659,7 @@ static void guc_init_doorbell_hw(struct intel_guc *guc) uint16_t db_id; int i, err; - /* Save client's original doorbell selection */ - db_id = client->doorbell_id; + guc_disable_doorbell(guc, client); for (i = 0; i < GUC_MAX_DOORBELLS; ++i) { /* Skip if doorbell is OK */ @@ -680,7 +672,9 @@ static void guc_init_doorbell_hw(struct intel_guc *guc) i, err); } - /* Restore to original value */ + db_id = select_doorbell_register(guc, client->priority); + WARN_ON(db_id == GUC_INVALID_DOORBELL_ID); + err = guc_update_doorbell_id(guc, client, db_id); if (err) DRM_WARN("Failed to restore doorbell to %d, err %d\n", @@ -770,8 +764,13 @@ guc_client_alloc(struct drm_i915_private *dev_priv, guc_proc_desc_init(guc, client); guc_ctx_desc_init(guc, client); - if (guc_init_doorbell(guc, client, db_id)) - goto err; + + /* For runtime client allocation we need to enable the doorbell. Not + * required yet for the static execbuf_client as this special kernel + * client is enabled from i915_guc_submission_enable(). + * + * guc_update_doorbell_id(guc, client, db_id); + */ DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: ctx_index %u\n", priority, client, client->engines, client->ctx_index); @@ -1371,6 +1370,9 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv) struct intel_guc *guc = &dev_priv->guc; struct i915_vma *vma; + if (!HAS_GUC_SCHED(dev_priv)) + return 0; + /* Wipe bitmap & delete client in case of reinitialisation */ bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS); i915_guc_submission_disable(dev_priv); @@ -1390,42 +1392,58 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv) guc_log_create(guc); guc_addon_create(guc); + guc->execbuf_client = guc_client_alloc(dev_priv, + INTEL_INFO(dev_priv)->ring_mask, + GUC_CTX_PRIORITY_KMD_NORMAL, + dev_priv->kernel_context); + if (!guc->execbuf_client) { + DRM_ERROR("Failed to create GuC client for execbuf!\n"); + goto err; + } + return 0; + +err: + i915_guc_submission_fini(dev_priv); + return -ENOMEM; +} + +static void guc_reset_wq(struct i915_guc_client *gc) +{ + struct guc_process_desc *desc = gc->vaddr + gc->proc_desc_offset; + + desc->head = 0; + desc->tail = 0; + + gc->wq_tail = 0; } int i915_guc_submission_enable(struct drm_i915_private *dev_priv) { struct intel_guc *guc = &dev_priv->guc; - struct drm_i915_gem_request *request; - struct i915_guc_client *client; + struct i915_guc_client *client = guc->execbuf_client; struct intel_engine_cs *engine; enum intel_engine_id id; - /* client for execbuf submission */ - client = guc_client_alloc(dev_priv, - INTEL_INFO(dev_priv)->ring_mask, - GUC_CTX_PRIORITY_KMD_NORMAL, - dev_priv->kernel_context); - if (!client) { - DRM_ERROR("Failed to create normal GuC client!\n"); - return -ENOMEM; - } + if (!client) + return -ENODEV; - guc->execbuf_client = client; intel_guc_sample_forcewake(guc); + + guc_reset_wq(client); guc_init_doorbell_hw(guc); /* Take over from manual control of ELSP (execlists) */ for_each_engine(engine, dev_priv, id) { + struct drm_i915_gem_request *rq; + engine->submit_request = i915_guc_submit; engine->schedule = NULL; /* Replay the current set of previously submitted requests */ - list_for_each_entry(request, - &engine->timeline->requests, link) { + list_for_each_entry(rq, &engine->timeline->requests, link) { client->wq_rsvd += sizeof(struct guc_wq_item); - if (i915_sw_fence_done(&request->submit)) - i915_guc_submit(request); + i915_guc_submit(rq); } } @@ -1441,14 +1459,18 @@ void i915_guc_submission_disable(struct drm_i915_private *dev_priv) /* Revert back to manual ELSP submission */ intel_execlists_enable_submission(dev_priv); - - guc_client_free(dev_priv, guc->execbuf_client); - guc->execbuf_client = NULL; } void i915_guc_submission_fini(struct drm_i915_private *dev_priv) { struct intel_guc *guc = &dev_priv->guc; + struct i915_guc_client *client; + + client = fetch_and_zero(&guc->execbuf_client); + if (!client) + return; + + guc_client_free(dev_priv, client); i915_vma_unpin_and_release(&guc->ads_vma); i915_vma_unpin_and_release(&guc->log.vma); -- cgit v1.2.3 From 34ba5a80f249cd465d138705da138a82b4fe7e67 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 29 Nov 2016 12:10:24 +0000 Subject: drm/i915/guc: Split hw submission for replay after GPU reset Something I missed before sending off the partial series was that the non-scheduler guc reset path was broken (in the full series, this is pushed to the execlists reset handler). The issue is that after a reset, we have to refill the GuC workqueues, which we do by resubmitting the requests. However, if we already have submitted them, the fences within them have already been used and triggering them again is an error. Instead, just repopulate the guc workqueue. [ 115.858560] [IGT] gem_busy: starting subtest hang-render [ 135.839867] [drm] GPU HANG: ecode 9:0:0xe757fefe, in gem_busy [1716], reason: Hang on render ring, action: reset [ 135.839902] drm/i915: Resetting chip after gpu hang [ 135.839957] [drm] RC6 on [ 135.858351] ------------[ cut here ]------------ [ 135.858357] WARNING: CPU: 2 PID: 45 at drivers/gpu/drm/i915/i915_sw_fence.c:108 i915_sw_fence_complete+0x25/0x30 [ 135.858357] Modules linked in: rfcomm bnep binfmt_misc nls_iso8859_1 input_leds snd_hda_codec_hdmi snd_hda_codec_realtek snd_hda_codec_generic snd_hda_intel snd_hda_codec snd_hda_core btusb btrtl snd_hwdep snd_pcm 8250_dw snd_seq_midi hid_lenovo snd_seq_midi_event snd_rawmidi iwlwifi x86_pkg_temp_thermal coretemp snd_seq crct10dif_pclmul snd_seq_device hci_uart snd_timer crc32_pclmul ghash_clmulni_intel idma64 aesni_intel virt_dma btbcm snd btqca aes_x86_64 btintel lrw cfg80211 bluetooth gf128mul glue_helper ablk_helper cryptd soundcore intel_lpss_pci intel_pch_thermal intel_lpss_acpi intel_lpss acpi_als mfd_core kfifo_buf acpi_pad industrialio autofs4 hid_plantronics usbhid dm_mirror dm_region_hash dm_log sdhci_pci ahci sdhci libahci i2c_hid hid [ 135.858389] CPU: 2 PID: 45 Comm: kworker/2:1 Tainted: G W 4.9.0-rc4+ #238 [ 135.858389] Hardware name: /NUC6i3SYB, BIOS SYSKLi35.86A.0024.2015.1027.2142 10/27/2015 [ 135.858392] Workqueue: events_long i915_hangcheck_elapsed [ 135.858394] ffffc900001bf9b8 ffffffff812bb238 0000000000000000 0000000000000000 [ 135.858396] ffffc900001bf9f8 ffffffff8104f621 0000006c00000000 ffff8808296137f8 [ 135.858398] 0000000000000a00 ffff8808457a0000 ffff880845764e60 ffff880845760000 [ 135.858399] Call Trace: [ 135.858403] [] dump_stack+0x4d/0x65 [ 135.858405] [] __warn+0xc1/0xe0 [ 135.858406] [] warn_slowpath_null+0x18/0x20 [ 135.858408] [] i915_sw_fence_complete+0x25/0x30 [ 135.858410] [] i915_sw_fence_commit+0xd/0x30 [ 135.858412] [] __i915_gem_request_submit+0xe1/0xf0 [ 135.858413] [] i915_gem_request_submit+0x28/0x40 [ 135.858415] [] i915_guc_submit+0x47/0x210 [ 135.858417] [] i915_guc_submission_enable+0x468/0x540 [ 135.858419] [] intel_guc_setup+0x715/0x810 [ 135.858421] [] i915_gem_init_hw+0x114/0x2a0 [ 135.858423] [] i915_reset+0xe8/0x120 [ 135.858424] [] i915_reset_and_wakeup+0x157/0x180 [ 135.858426] [] i915_handle_error+0x1ab/0x230 [ 135.858428] [] ? scnprintf+0x4d/0x90 [ 135.858430] [] i915_hangcheck_elapsed+0x275/0x3d0 [ 135.858432] [] process_one_work+0x12f/0x410 [ 135.858433] [] worker_thread+0x43/0x4d0 [ 135.858435] [] ? process_one_work+0x410/0x410 [ 135.858436] [] ? process_one_work+0x410/0x410 [ 135.858438] [] kthread+0xd4/0xf0 [ 135.858440] [] ? kthread_park+0x60/0x60 v2: Only resubmit submitted requests v3: Don't forget the pending requests have reserved space. Fixes: d55ac5bf97c6 ("drm/i915: Defer transfer onto execution timeline to actual hw submission") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20161129121024.22650-6-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_guc_submission.c | 37 ++++++++++++++++-------------- 1 file changed, 20 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index b0f858cf2881..58413803ba3c 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -489,12 +489,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc) } /** - * i915_guc_submit() - Submit commands through GuC + * __i915_guc_submit() - Submit commands through GuC * @rq: request associated with the commands * - * Return: 0 on success, otherwise an errno. - * (Note: nonzero really shouldn't happen!) - * * The caller must have already called i915_guc_wq_reserve() above with * a result of 0 (success), guaranteeing that there is space in the work * queue for the new request, so enqueuing the item cannot fail. @@ -506,7 +503,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc) * The only error here arises if the doorbell hardware isn't functioning * as expected, which really shouln't happen. */ -static void i915_guc_submit(struct drm_i915_gem_request *rq) +static void __i915_guc_submit(struct drm_i915_gem_request *rq) { struct drm_i915_private *dev_priv = rq->i915; struct intel_engine_cs *engine = rq->engine; @@ -515,17 +512,6 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq) struct i915_guc_client *client = guc->execbuf_client; int b_ret; - /* We keep the previous context alive until we retire the following - * request. This ensures that any the context object is still pinned - * for any residual writes the HW makes into it on the context switch - * into the next object following the breadcrumb. Otherwise, we may - * retire the context too early. - */ - rq->previous_context = engine->last_context; - engine->last_context = rq->ctx; - - i915_gem_request_submit(rq); - spin_lock(&client->wq_lock); guc_wq_item_append(client, rq); @@ -545,6 +531,23 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq) spin_unlock(&client->wq_lock); } +static void i915_guc_submit(struct drm_i915_gem_request *rq) +{ + struct intel_engine_cs *engine = rq->engine; + + /* We keep the previous context alive until we retire the following + * request. This ensures that any the context object is still pinned + * for any residual writes the HW makes into it on the context switch + * into the next object following the breadcrumb. Otherwise, we may + * retire the context too early. + */ + rq->previous_context = engine->last_context; + engine->last_context = rq->ctx; + + i915_gem_request_submit(rq); + __i915_guc_submit(rq); +} + /* * Everything below here is concerned with setup & teardown, and is * therefore not part of the somewhat time-critical batch-submission @@ -1443,7 +1446,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv) /* Replay the current set of previously submitted requests */ list_for_each_entry(rq, &engine->timeline->requests, link) { client->wq_rsvd += sizeof(struct guc_wq_item); - i915_guc_submit(rq); + __i915_guc_submit(rq); } } -- cgit v1.2.3 From 6a259b1f8a9e99b1ed114f8bf8b0cfccee130e54 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 29 Nov 2016 16:13:57 +0200 Subject: drm/i915: Initialize dev_priv->atomic_cdclk_freq at init time MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Looks like we're only initializing dev_priv->atomic_cdclk_freq at resume and commit times, not at init time. Let's do that as well. We're now hitting the 'WARN_ON(intel_state->cdclk == 0)' in hsw_compute_linetime_wm() on account of populating intel_state->cdclk from dev_priv->atomic_cdclk_freq. Previously we were mispopulating intel_state->cdclk with dev_priv->cdclk_freq which always had a proper value at init time and hence the WARN_ON() didn't trigger. Cc: stable@vger.kernel.org Cc: Matthew Auld Reported-by: Matthew Auld Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=98902 Fixes: e0ca7a6be38c ("drm/i915: Fix cdclk vs. dev_cdclk mess when not recomputing things") Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1480428837-4207-1-git-send-email-ville.syrjala@linux.intel.com Tested-by: Matthew Auld Reviewed-by: Matthew Auld --- drivers/gpu/drm/i915/intel_display.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3b9e60aeca51..978463671462 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -16516,6 +16516,7 @@ int intel_modeset_init(struct drm_device *dev) intel_update_czclk(dev_priv); intel_update_cdclk(dev_priv); + dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq; intel_shared_dpll_init(dev); -- cgit v1.2.3 From d4881a48a85ccb8b100fb7cd4f4791f35b0a3f49 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 30 Nov 2016 12:01:31 +0100 Subject: drm/i915: Remove dummy i915_kick_out_firmware_fb Since commit 44adece57e2604cec8527a499b48e4d584ab53b8 Author: Daniel Vetter Date: Wed Aug 10 18:52:34 2016 +0200 drm/fb-helper: Add a dummy remove_conflicting_framebuffers the drm helpers take care of this for us. Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161130110131.25668-1-daniel.vetter@ffwll.ch --- drivers/gpu/drm/i915/i915_drv.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 8dac298461c0..dc80c8ff54e3 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -643,7 +643,6 @@ out: return ret; } -#if IS_ENABLED(CONFIG_FB) static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) { struct apertures_struct *ap; @@ -668,12 +667,6 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) return ret; } -#else -static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) -{ - return 0; -} -#endif #if !defined(CONFIG_VGA_CONSOLE) static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) -- cgit v1.2.3 From 80fa66b6ad2befc11c93d9b74208110eaeaa6d41 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Thu, 1 Dec 2016 11:33:16 +0200 Subject: drm/i915: Create a common GEN9_LP_FEATURE. The following LP platform inherits a lot of this platform So let's simplify here to re-use this later. v2: Keep ddb_size out of the new macro. v3: Rebase (has_decoupled_mmio). (Imre) Signed-off-by: Rodrigo Vivi Signed-off-by: Ander Conselvan de Oliveira Reviewed-by: Imre Deak Link: http://patchwork.freedesktop.org/patch/msgid/1480584796-19466-1-git-send-email-ander.conselvan.de.oliveira@intel.com --- drivers/gpu/drm/i915/i915_pci.c | 47 ++++++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index fce8e198bc76..2797dec5c9f1 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -343,31 +343,34 @@ static const struct intel_device_info intel_skylake_gt3_info = { .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, }; +#define GEN9_LP_FEATURES \ + .gen = 9, \ + .has_hotplug = 1, \ + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ + .num_pipes = 3, \ + .has_64bit_reloc = 1, \ + .has_ddi = 1, \ + .has_fpga_dbg = 1, \ + .has_fbc = 1, \ + .has_runtime_pm = 1, \ + .has_pooled_eu = 0, \ + .has_csr = 1, \ + .has_resource_streamer = 1, \ + .has_rc6 = 1, \ + .has_dp_mst = 1, \ + .has_gmbus_irq = 1, \ + .has_hw_contexts = 1, \ + .has_logical_ring_contexts = 1, \ + .has_guc = 1, \ + .has_decoupled_mmio = 1, \ + GEN_DEFAULT_PIPEOFFSETS, \ + IVB_CURSOR_OFFSETS, \ + BDW_COLORS + static const struct intel_device_info intel_broxton_info = { .is_broxton = 1, - .gen = 9, - .has_hotplug = 1, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, - .num_pipes = 3, - .has_64bit_reloc = 1, - .has_ddi = 1, - .has_fpga_dbg = 1, - .has_fbc = 1, - .has_runtime_pm = 1, - .has_pooled_eu = 0, - .has_csr = 1, - .has_resource_streamer = 1, - .has_rc6 = 1, - .has_dp_mst = 1, - .has_gmbus_irq = 1, - .has_hw_contexts = 1, - .has_logical_ring_contexts = 1, - .has_guc = 1, - .has_decoupled_mmio = 1, + GEN9_LP_FEATURES, .ddb_size = 512, - GEN_DEFAULT_PIPEOFFSETS, - IVB_CURSOR_OFFSETS, - BDW_COLORS, }; static const struct intel_device_info intel_kabylake_info = { -- cgit v1.2.3 From c22097fa47697f2f8fa1407ad012d3b987621e6a Mon Sep 17 00:00:00 2001 From: Ander Conselvan de Oliveira Date: Mon, 14 Nov 2016 16:25:26 +0200 Subject: drm/i915/glk: Introduce Geminilake platform definition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Geminilake is an Intel® Processor containing Intel® HD Graphics following Broxton. Let's start by adding the platform definition. PCI IDs and plaform specific code will follow. v2: Rebase (don't allow dev to be used with the new macro). v3: Update ddb size. (Matt) Rebase on s/preliminary_hw/alpha/ Cc: Matt Roper Cc: Rodrigo Vivi Signed-off-by: Ander Conselvan de Oliveira Reviewed-by: Rodrigo Vivi Link: http://patchwork.freedesktop.org/patch/msgid/1479133526-32389-1-git-send-email-ander.conselvan.de.oliveira@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 2 ++ drivers/gpu/drm/i915/i915_pci.c | 7 +++++++ 2 files changed, 9 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 297ad03ab0c2..9a76b90c25a5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -703,6 +703,7 @@ struct intel_csr { func(is_broadwell); \ func(is_skylake); \ func(is_broxton); \ + func(is_geminilake); \ func(is_kabylake); \ func(is_alpha_support); \ /* Keep has_* in alphabetical order */ \ @@ -2522,6 +2523,7 @@ intel_info(const struct drm_i915_private *dev_priv) #define IS_BROADWELL(dev_priv) ((dev_priv)->info.is_broadwell) #define IS_SKYLAKE(dev_priv) ((dev_priv)->info.is_skylake) #define IS_BROXTON(dev_priv) ((dev_priv)->info.is_broxton) +#define IS_GEMINILAKE(dev_priv) ((dev_priv)->info.is_geminilake) #define IS_KABYLAKE(dev_priv) ((dev_priv)->info.is_kabylake) #define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile) #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 2797dec5c9f1..a3cc0e7d0ac8 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -373,6 +373,13 @@ static const struct intel_device_info intel_broxton_info = { .ddb_size = 512, }; +static const struct intel_device_info intel_geminilake_info = { + .is_alpha_support = 1, + .is_geminilake = 1, + GEN9_LP_FEATURES, + .ddb_size = 1024, +}; + static const struct intel_device_info intel_kabylake_info = { BDW_FEATURES, .is_kabylake = 1, -- cgit v1.2.3 From 8363e3c3947d0e22955f94a6a87e4f17ce5087b4 Mon Sep 17 00:00:00 2001 From: Ander Conselvan de Oliveira Date: Thu, 10 Nov 2016 17:23:08 +0200 Subject: drm/i915/glk: Add Geminilake PCI IDs v2: Add new 0x3185 ID. (Joonas) Signed-off-by: Ander Conselvan de Oliveira Reviewed-by: Rodrigo Vivi Link: http://patchwork.freedesktop.org/patch/msgid/1478791400-21756-4-git-send-email-ander.conselvan.de.oliveira@intel.com --- drivers/gpu/drm/i915/i915_pci.c | 1 + include/drm/i915_pciids.h | 4 ++++ 2 files changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index a3cc0e7d0ac8..ab4fe712126b 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -437,6 +437,7 @@ static const struct pci_device_id pciidlist[] = { INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info), INTEL_BXT_IDS(&intel_broxton_info), + INTEL_GLK_IDS(&intel_geminilake_info), INTEL_KBL_GT1_IDS(&intel_kabylake_info), INTEL_KBL_GT2_IDS(&intel_kabylake_info), INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 0d5f4268d75f..540be9ff0346 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -292,6 +292,10 @@ INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \ INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */ +#define INTEL_GLK_IDS(info) \ + INTEL_VGA_DEVICE(0x3184, info), \ + INTEL_VGA_DEVICE(0x3185, info) + #define INTEL_KBL_GT1_IDS(info) \ INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \ INTEL_VGA_DEVICE(0x5915, info), /* ULX GT1.5 */ \ -- cgit v1.2.3 From 3e4274f86e73388025c1507c10e1a785eae44488 Mon Sep 17 00:00:00 2001 From: Ander Conselvan de Oliveira Date: Thu, 10 Nov 2016 17:23:09 +0200 Subject: drm/i915/glk: Add a IS_GEN9_LP() macro Broxton and Geminilake are both gen9lp platforms. To avoid adding IS_GEMINILAKE() checks everywhere alongside the IS_BROXTON() ones, add a IS_GEN9_LP() macro. v2: Rename macro parameter to dev_priv. (Joonas) Signed-off-by: Ander Conselvan de Oliveira Reviewed-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_drv.h | 3 +++ drivers/gpu/drm/i915/i915_pci.c | 1 + 2 files changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9a76b90c25a5..34f2b0da6a81 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -705,6 +705,7 @@ struct intel_csr { func(is_broxton); \ func(is_geminilake); \ func(is_kabylake); \ + func(is_lp); \ func(is_alpha_support); \ /* Keep has_* in alphabetical order */ \ func(has_64bit_reloc); \ @@ -2611,6 +2612,8 @@ intel_info(const struct drm_i915_private *dev_priv) #define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7))) #define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8))) +#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && INTEL_INFO(dev_priv)->is_lp) + #define ENGINE_MASK(id) BIT(id) #define RENDER_RING ENGINE_MASK(RCS) #define BSD_RING ENGINE_MASK(VCS) diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index ab4fe712126b..389a33090707 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -345,6 +345,7 @@ static const struct intel_device_info intel_skylake_gt3_info = { #define GEN9_LP_FEATURES \ .gen = 9, \ + .is_lp = 1, \ .has_hotplug = 1, \ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ .num_pipes = 3, \ -- cgit v1.2.3 From 908764f6d0bd1ba496cb8da33b9b98297ed27351 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Tue, 29 Nov 2016 21:40:29 +0200 Subject: drm/i915/lspcon: Enable AUX interrupts for resume time initialization MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For LSPCON initialization during system resume we need AUX functionality, but we call the corresponding encoder reset hook with all interrupts disabled. Without interrupts we'll do a poll-wait for AUX transfer completions, which adds a significant delay if the transfers timeout/need to be retried for some reason. Fix this by enabling interrupts before calling the reset hooks. Note that while this will enable AUX interrupts it will keep HPD interrupts disabled, in a similar way to the init time output setup code. This issue existed since LSPCON support was added. v2: - Rebased on drm-tip. Cc: Shashank Sharma Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Tested-by: David Weinehall Link: http://patchwork.freedesktop.org/patch/msgid/1480448429-27739-1-git-send-email-imre.deak@intel.com --- drivers/gpu/drm/i915/i915_drv.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index dc80c8ff54e3..89af78eb5c32 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1575,18 +1575,21 @@ static int i915_drm_resume(struct drm_device *dev) intel_opregion_setup(dev_priv); intel_init_pch_refclk(dev_priv); - drm_mode_config_reset(dev); /* * Interrupts have to be enabled before any batches are run. If not the * GPU will hang. i915_gem_init_hw() will initiate batches to * update/restore the context. * + * drm_mode_config_reset() needs AUX interrupts. + * * Modeset enabling in intel_modeset_init_hw() also needs working * interrupts. */ intel_runtime_pm_enable_interrupts(dev_priv); + drm_mode_config_reset(dev); + mutex_lock(&dev->struct_mutex); if (i915_gem_init_hw(dev)) { DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); -- cgit v1.2.3 From 187685cb909881822f4eeaa5acdf80e25d7d1489 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 1 Dec 2016 14:16:36 +0000 Subject: drm/i915: Make GEM object alloc/free and stolen created take dev_priv Where it is more appropriate and also to be consistent with the direction of the driver. v2: Leave out object alloc/free inlining. (Joonas Lahtinen) Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Reviewed-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_drv.h | 6 +++--- drivers/gpu/drm/i915/i915_gem.c | 5 ++--- drivers/gpu/drm/i915/i915_gem_dmabuf.c | 2 +- drivers/gpu/drm/i915/i915_gem_internal.c | 2 +- drivers/gpu/drm/i915/i915_gem_stolen.c | 21 +++++++++------------ drivers/gpu/drm/i915/i915_gem_userptr.c | 2 +- drivers/gpu/drm/i915/intel_display.c | 2 +- drivers/gpu/drm/i915/intel_engine_cs.c | 2 +- drivers/gpu/drm/i915/intel_fbdev.c | 2 +- drivers/gpu/drm/i915/intel_overlay.c | 3 +-- drivers/gpu/drm/i915/intel_pm.c | 4 ++-- drivers/gpu/drm/i915/intel_ringbuffer.c | 2 +- 12 files changed, 24 insertions(+), 29 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 34f2b0da6a81..65d7a7811236 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2981,7 +2981,7 @@ void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); int i915_gem_freeze(struct drm_i915_private *dev_priv); int i915_gem_freeze_late(struct drm_i915_private *dev_priv); -void *i915_gem_object_alloc(struct drm_device *dev); +void *i915_gem_object_alloc(struct drm_i915_private *dev_priv); void i915_gem_object_free(struct drm_i915_gem_object *obj); void i915_gem_object_init(struct drm_i915_gem_object *obj, const struct drm_i915_gem_object_ops *ops); @@ -3366,9 +3366,9 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, int i915_gem_init_stolen(struct drm_i915_private *dev_priv); void i915_gem_cleanup_stolen(struct drm_device *dev); struct drm_i915_gem_object * -i915_gem_object_create_stolen(struct drm_device *dev, u32 size); +i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size); struct drm_i915_gem_object * -i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, +i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv, u32 stolen_offset, u32 gtt_offset, u32 size); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8ebefb6f6cf2..ab75d27b74d5 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -622,9 +622,8 @@ out: return ret; } -void *i915_gem_object_alloc(struct drm_device *dev) +void *i915_gem_object_alloc(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL); } @@ -3990,7 +3989,7 @@ i915_gem_object_create(struct drm_device *dev, u64 size) if (overflows_type(size, obj->base.size)) return ERR_PTR(-E2BIG); - obj = i915_gem_object_alloc(dev); + obj = i915_gem_object_alloc(dev_priv); if (obj == NULL) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 5e38299b5df6..d037adcda6f2 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -278,7 +278,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, get_dma_buf(dma_buf); - obj = i915_gem_object_alloc(dev); + obj = i915_gem_object_alloc(to_i915(dev)); if (obj == NULL) { ret = -ENOMEM; goto fail_detach; diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c index 4b3ff3e5b911..08d26306d40e 100644 --- a/drivers/gpu/drm/i915/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/i915_gem_internal.c @@ -155,7 +155,7 @@ i915_gem_object_create_internal(struct drm_i915_private *i915, { struct drm_i915_gem_object *obj; - obj = i915_gem_object_alloc(&i915->drm); + obj = i915_gem_object_alloc(i915); if (!obj) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index ebaa941c83af..b3bac2557665 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -579,22 +579,21 @@ static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { }; static struct drm_i915_gem_object * -_i915_gem_object_create_stolen(struct drm_device *dev, +_i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, struct drm_mm_node *stolen) { struct drm_i915_gem_object *obj; - obj = i915_gem_object_alloc(dev); + obj = i915_gem_object_alloc(dev_priv); if (obj == NULL) return NULL; - drm_gem_private_object_init(dev, &obj->base, stolen->size); + drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size); i915_gem_object_init(obj, &i915_gem_object_stolen_ops); obj->stolen = stolen; obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; - obj->cache_level = HAS_LLC(to_i915(dev)) ? - I915_CACHE_LLC : I915_CACHE_NONE; + obj->cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE; if (i915_gem_object_pin_pages(obj)) goto cleanup; @@ -607,9 +606,8 @@ cleanup: } struct drm_i915_gem_object * -i915_gem_object_create_stolen(struct drm_device *dev, u32 size) +i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size) { - struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; struct drm_mm_node *stolen; int ret; @@ -630,7 +628,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size) return NULL; } - obj = _i915_gem_object_create_stolen(dev, stolen); + obj = _i915_gem_object_create_stolen(dev_priv, stolen); if (obj) return obj; @@ -640,12 +638,11 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size) } struct drm_i915_gem_object * -i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, +i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv, u32 stolen_offset, u32 gtt_offset, u32 size) { - struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_i915_gem_object *obj; struct drm_mm_node *stolen; @@ -655,7 +652,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, if (!drm_mm_initialized(&dev_priv->mm.stolen)) return NULL; - lockdep_assert_held(&dev->struct_mutex); + lockdep_assert_held(&dev_priv->drm.struct_mutex); DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n", stolen_offset, gtt_offset, size); @@ -680,7 +677,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, return NULL; } - obj = _i915_gem_object_create_stolen(dev, stolen); + obj = _i915_gem_object_create_stolen(dev_priv, stolen); if (obj == NULL) { DRM_DEBUG_KMS("failed to allocate stolen object\n"); i915_gem_stolen_remove_node(dev_priv, stolen); diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 107ddf51065e..48963a20992f 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -784,7 +784,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file return -ENODEV; } - obj = i915_gem_object_alloc(dev); + obj = i915_gem_object_alloc(dev_priv); if (obj == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 978463671462..aeaf701f0ff5 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2689,7 +2689,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, mutex_lock(&dev->struct_mutex); - obj = i915_gem_object_create_stolen_for_preallocated(dev, + obj = i915_gem_object_create_stolen_for_preallocated(dev_priv, base_aligned, base_aligned, size_aligned); diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 3da4d466e332..348fae620811 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -257,7 +257,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size) WARN_ON(engine->scratch); - obj = i915_gem_object_create_stolen(&engine->i915->drm, size); + obj = i915_gem_object_create_stolen(engine->i915, size); if (!obj) obj = i915_gem_object_create_internal(engine->i915, size); if (IS_ERR(obj)) { diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index fc958d5ed0dc..d7e50db20bb8 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -145,7 +145,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper, * important and we should probably use that space with FBC or other * features. */ if (size * 2 < ggtt->stolen_usable_size) - obj = i915_gem_object_create_stolen(dev, size); + obj = i915_gem_object_create_stolen(dev_priv, size); if (obj == NULL) obj = i915_gem_object_create(dev, size); if (IS_ERR(obj)) { diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index fd0e4dac7cc1..0a7b83aaa2b4 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -1391,8 +1391,7 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv) reg_bo = NULL; if (!OVERLAY_NEEDS_PHYSICAL(dev_priv)) - reg_bo = i915_gem_object_create_stolen(&dev_priv->drm, - PAGE_SIZE); + reg_bo = i915_gem_object_create_stolen(dev_priv, PAGE_SIZE); if (reg_bo == NULL) reg_bo = i915_gem_object_create(&dev_priv->drm, PAGE_SIZE); if (IS_ERR(reg_bo)) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index bfd2c289ce49..70108aa2309c 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5805,7 +5805,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv) int pcbr_offset; pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base; - pctx = i915_gem_object_create_stolen_for_preallocated(&dev_priv->drm, + pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv, pcbr_offset, I915_GTT_OFFSET_NONE, pctx_size); @@ -5822,7 +5822,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv) * overlap with other ranges, such as the frame buffer, protected * memory, or any other relevant ranges. */ - pctx = i915_gem_object_create_stolen(&dev_priv->drm, pctx_size); + pctx = i915_gem_object_create_stolen(dev_priv, pctx_size); if (!pctx) { DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); goto out; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index aeb637dc1fdf..e193e52f669d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1869,7 +1869,7 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) struct drm_i915_gem_object *obj; struct i915_vma *vma; - obj = i915_gem_object_create_stolen(&dev_priv->drm, size); + obj = i915_gem_object_create_stolen(dev_priv, size); if (!obj) obj = i915_gem_object_create(&dev_priv->drm, size); if (IS_ERR(obj)) -- cgit v1.2.3 From 12d79d78287cdc5323b4a589a2ca2ec16c5063fc Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 1 Dec 2016 14:16:37 +0000 Subject: drm/i915: Make GEM object create and create from data take dev_priv Makes all GEM object constructors consistent. v2: Fix compilation in GVT code. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Reviewed-by: Joonas Lahtinen (v1) --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 5 ++--- drivers/gpu/drm/i915/i915_drv.h | 9 +++++---- drivers/gpu/drm/i915/i915_gem.c | 20 ++++++++++---------- drivers/gpu/drm/i915/i915_gem_context.c | 5 +++-- drivers/gpu/drm/i915/i915_guc_submission.c | 2 +- drivers/gpu/drm/i915/i915_perf.c | 2 +- drivers/gpu/drm/i915/intel_display.c | 2 +- drivers/gpu/drm/i915/intel_fbdev.c | 2 +- drivers/gpu/drm/i915/intel_guc_loader.c | 5 +++-- drivers/gpu/drm/i915/intel_lrc.c | 4 ++-- drivers/gpu/drm/i915/intel_overlay.c | 2 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 4 ++-- 12 files changed, 32 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index d26a092c70e8..9a4b23c3ee97 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1602,7 +1602,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) return -ENOMEM; entry_obj->obj = - i915_gem_object_create(&(s->vgpu->gvt->dev_priv->drm), + i915_gem_object_create(s->vgpu->gvt->dev_priv, roundup(bb_size, PAGE_SIZE)); if (IS_ERR(entry_obj->obj)) { ret = PTR_ERR(entry_obj->obj); @@ -2665,14 +2665,13 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) { - struct drm_device *dev = &wa_ctx->workload->vgpu->gvt->dev_priv->drm; int ctx_size = wa_ctx->indirect_ctx.size; unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma; struct drm_i915_gem_object *obj; int ret = 0; void *map; - obj = i915_gem_object_create(dev, + obj = i915_gem_object_create(wa_ctx->workload->vgpu->gvt->dev_priv, roundup(ctx_size + CACHELINE_BYTES, PAGE_SIZE)); if (IS_ERR(obj)) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 65d7a7811236..8b725d13d24e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2985,10 +2985,11 @@ void *i915_gem_object_alloc(struct drm_i915_private *dev_priv); void i915_gem_object_free(struct drm_i915_gem_object *obj); void i915_gem_object_init(struct drm_i915_gem_object *obj, const struct drm_i915_gem_object_ops *ops); -struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev, - u64 size); -struct drm_i915_gem_object *i915_gem_object_create_from_data( - struct drm_device *dev, const void *data, size_t size); +struct drm_i915_gem_object * +i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size); +struct drm_i915_gem_object * +i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, + const void *data, size_t size); void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); void i915_gem_free_object(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ab75d27b74d5..10c3b505f49a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -635,7 +635,7 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj) static int i915_gem_create(struct drm_file *file, - struct drm_device *dev, + struct drm_i915_private *dev_priv, uint64_t size, uint32_t *handle_p) { @@ -648,7 +648,7 @@ i915_gem_create(struct drm_file *file, return -EINVAL; /* Allocate the new object */ - obj = i915_gem_object_create(dev, size); + obj = i915_gem_object_create(dev_priv, size); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -670,7 +670,7 @@ i915_gem_dumb_create(struct drm_file *file, /* have to work out size/pitch and return them */ args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); args->size = args->pitch * args->height; - return i915_gem_create(file, dev, + return i915_gem_create(file, to_i915(dev), args->size, &args->handle); } @@ -684,11 +684,12 @@ int i915_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_create *args = data; - i915_gem_flush_free_objects(to_i915(dev)); + i915_gem_flush_free_objects(dev_priv); - return i915_gem_create(file, dev, + return i915_gem_create(file, dev_priv, args->size, &args->handle); } @@ -3970,9 +3971,8 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = { (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE)) struct drm_i915_gem_object * -i915_gem_object_create(struct drm_device *dev, u64 size) +i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size) { - struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; struct address_space *mapping; gfp_t mask; @@ -3993,7 +3993,7 @@ i915_gem_object_create(struct drm_device *dev, u64 size) if (obj == NULL) return ERR_PTR(-ENOMEM); - ret = drm_gem_object_init(dev, &obj->base, size); + ret = drm_gem_object_init(&dev_priv->drm, &obj->base, size); if (ret) goto fail; @@ -4749,7 +4749,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, /* Allocate a new GEM object and fill it with the supplied data */ struct drm_i915_gem_object * -i915_gem_object_create_from_data(struct drm_device *dev, +i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, const void *data, size_t size) { struct drm_i915_gem_object *obj; @@ -4757,7 +4757,7 @@ i915_gem_object_create_from_data(struct drm_device *dev, size_t bytes; int ret; - obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE)); + obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE)); if (IS_ERR(obj)) return obj; diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index a6add0c14045..5241b51dd986 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -169,12 +169,13 @@ void i915_gem_context_free(struct kref *ctx_ref) static struct drm_i915_gem_object * alloc_context_obj(struct drm_device *dev, u64 size) { + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; int ret; lockdep_assert_held(&dev->struct_mutex); - obj = i915_gem_object_create(dev, size); + obj = i915_gem_object_create(dev_priv, size); if (IS_ERR(obj)) return obj; @@ -193,7 +194,7 @@ alloc_context_obj(struct drm_device *dev, u64 size) * This is only applicable for Ivy Bridge devices since * later platforms don't have L3 control bits in the PTE. */ - if (IS_IVYBRIDGE(to_i915(dev))) { + if (IS_IVYBRIDGE(dev_priv)) { ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC); /* Failure shouldn't ever happen this early */ if (WARN_ON(ret)) { diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 58413803ba3c..1003b443112c 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -574,7 +574,7 @@ static struct i915_vma *guc_allocate_vma(struct intel_guc *guc, u32 size) struct i915_vma *vma; int ret; - obj = i915_gem_object_create(&dev_priv->drm, size); + obj = i915_gem_object_create(dev_priv, size); if (IS_ERR(obj)) return ERR_CAST(obj); diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 14de9a4eee27..5669f0862458 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -773,7 +773,7 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv) BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE); BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M); - bo = i915_gem_object_create(&dev_priv->drm, OA_BUFFER_SIZE); + bo = i915_gem_object_create(dev_priv, OA_BUFFER_SIZE); if (IS_ERR(bo)) { DRM_ERROR("Failed to allocate OA buffer\n"); ret = PTR_ERR(bo); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index aeaf701f0ff5..b91e0abf66ba 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -10998,7 +10998,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev, struct drm_i915_gem_object *obj; struct drm_mode_fb_cmd2 mode_cmd = { 0 }; - obj = i915_gem_object_create(dev, + obj = i915_gem_object_create(to_i915(dev), intel_framebuffer_size_for_mode(mode, bpp)); if (IS_ERR(obj)) return ERR_CAST(obj); diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index d7e50db20bb8..3c445b92e988 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -147,7 +147,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper, if (size * 2 < ggtt->stolen_usable_size) obj = i915_gem_object_create_stolen(dev_priv, size); if (obj == NULL) - obj = i915_gem_object_create(dev, size); + obj = i915_gem_object_create(dev_priv, size); if (IS_ERR(obj)) { DRM_ERROR("failed to allocate framebuffer\n"); ret = PTR_ERR(obj); diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index a330fa499384..9926747d160f 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -590,6 +590,7 @@ fail: static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) { + struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = dev->pdev; struct drm_i915_gem_object *obj; const struct firmware *fw = NULL; @@ -648,7 +649,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) /* Header and uCode will be loaded to WOPCM. Size of the two. */ size = guc_fw->header_size + guc_fw->ucode_size; - if (size > guc_wopcm_size(to_i915(dev))) { + if (size > guc_wopcm_size(dev_priv)) { DRM_NOTE("Firmware is too large to fit in WOPCM\n"); goto fail; } @@ -676,7 +677,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted); mutex_lock(&dev->struct_mutex); - obj = i915_gem_object_create_from_data(dev, fw->data, fw->size); + obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size); mutex_unlock(&dev->struct_mutex); if (IS_ERR_OR_NULL(obj)) { err = obj ? PTR_ERR(obj) : -ENOMEM; diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index b2c0d509e191..67aec8f33c1d 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1243,7 +1243,7 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size) struct i915_vma *vma; int err; - obj = i915_gem_object_create(&engine->i915->drm, PAGE_ALIGN(size)); + obj = i915_gem_object_create(engine->i915, PAGE_ALIGN(size)); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -2242,7 +2242,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, /* One extra page as the sharing data between driver and GuC */ context_size += PAGE_SIZE * LRC_PPHWSP_PN; - ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size); + ctx_obj = i915_gem_object_create(ctx->i915, context_size); if (IS_ERR(ctx_obj)) { DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n"); return PTR_ERR(ctx_obj); diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 0a7b83aaa2b4..90da6a707de7 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -1393,7 +1393,7 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv) if (!OVERLAY_NEEDS_PHYSICAL(dev_priv)) reg_bo = i915_gem_object_create_stolen(dev_priv, PAGE_SIZE); if (reg_bo == NULL) - reg_bo = i915_gem_object_create(&dev_priv->drm, PAGE_SIZE); + reg_bo = i915_gem_object_create(dev_priv, PAGE_SIZE); if (IS_ERR(reg_bo)) goto out_free; overlay->reg_bo = reg_bo; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index e193e52f669d..bc18a4f2643d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1871,7 +1871,7 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) obj = i915_gem_object_create_stolen(dev_priv, size); if (!obj) - obj = i915_gem_object_create(&dev_priv->drm, size); + obj = i915_gem_object_create(dev_priv, size); if (IS_ERR(obj)) return ERR_CAST(obj); @@ -2452,7 +2452,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) { struct i915_vma *vma; - obj = i915_gem_object_create(&dev_priv->drm, 4096); + obj = i915_gem_object_create(dev_priv, 4096); if (IS_ERR(obj)) goto err; -- cgit v1.2.3 From bf9e8429ab9747f584e692bad52a7a9f1787a4da Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 1 Dec 2016 14:16:38 +0000 Subject: drm/i915: Make various init functions take dev_priv Like GEM init, GUC init, MOCS init and context creation. Enables them to lose dev_priv locals. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Reviewed-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_drv.c | 31 ++++++++++++++-------------- drivers/gpu/drm/i915/i915_drv.h | 10 ++++----- drivers/gpu/drm/i915/i915_gem.c | 28 ++++++++++++------------- drivers/gpu/drm/i915/i915_gem_context.c | 31 +++++++++++++--------------- drivers/gpu/drm/i915/i915_guc_submission.c | 10 ++++----- drivers/gpu/drm/i915/intel_engine_cs.c | 5 ++--- drivers/gpu/drm/i915/intel_guc_loader.c | 33 ++++++++++++++---------------- drivers/gpu/drm/i915/intel_lrc.h | 2 +- drivers/gpu/drm/i915/intel_mocs.c | 5 ++--- drivers/gpu/drm/i915/intel_mocs.h | 2 +- drivers/gpu/drm/i915/intel_uc.h | 10 ++++----- 11 files changed, 77 insertions(+), 90 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 89af78eb5c32..a7f85bdf8d66 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -603,9 +603,9 @@ static int i915_load_modeset_init(struct drm_device *dev) if (ret) goto cleanup_irq; - intel_guc_init(dev); + intel_guc_init(dev_priv); - ret = i915_gem_init(dev); + ret = i915_gem_init(dev_priv); if (ret) goto cleanup_irq; @@ -626,11 +626,11 @@ static int i915_load_modeset_init(struct drm_device *dev) return 0; cleanup_gem: - if (i915_gem_suspend(dev)) + if (i915_gem_suspend(dev_priv)) DRM_ERROR("failed to idle hardware; continuing to unload!\n"); i915_gem_fini(dev_priv); cleanup_irq: - intel_guc_fini(dev); + intel_guc_fini(dev_priv); drm_irq_uninstall(dev); intel_teardown_gmbus(dev); cleanup_csr: @@ -1283,7 +1283,7 @@ void i915_driver_unload(struct drm_device *dev) intel_fbdev_fini(dev); - if (i915_gem_suspend(dev)) + if (i915_gem_suspend(dev_priv)) DRM_ERROR("failed to idle hardware; continuing to unload!\n"); intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); @@ -1320,7 +1320,7 @@ void i915_driver_unload(struct drm_device *dev) /* Flush any outstanding unpin_work. */ drain_workqueue(dev_priv->wq); - intel_guc_fini(dev); + intel_guc_fini(dev_priv); i915_gem_fini(dev_priv); intel_fbc_cleanup_cfb(dev_priv); @@ -1425,14 +1425,14 @@ static int i915_drm_suspend(struct drm_device *dev) pci_save_state(pdev); - error = i915_gem_suspend(dev); + error = i915_gem_suspend(dev_priv); if (error) { dev_err(&pdev->dev, "GEM idle failed, resume might fail\n"); goto out; } - intel_guc_suspend(dev); + intel_guc_suspend(dev_priv); intel_display_suspend(dev); @@ -1568,7 +1568,7 @@ static int i915_drm_resume(struct drm_device *dev) intel_csr_ucode_resume(dev_priv); - i915_gem_resume(dev); + i915_gem_resume(dev_priv); i915_restore_state(dev); intel_pps_unlock_regs_wa(dev_priv); @@ -1591,13 +1591,13 @@ static int i915_drm_resume(struct drm_device *dev) drm_mode_config_reset(dev); mutex_lock(&dev->struct_mutex); - if (i915_gem_init_hw(dev)) { + if (i915_gem_init_hw(dev_priv)) { DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); i915_gem_set_wedged(dev_priv); } mutex_unlock(&dev->struct_mutex); - intel_guc_resume(dev); + intel_guc_resume(dev_priv); intel_modeset_init_hw(dev); @@ -1770,11 +1770,10 @@ static void enable_engines_irq(struct drm_i915_private *dev_priv) */ void i915_reset(struct drm_i915_private *dev_priv) { - struct drm_device *dev = &dev_priv->drm; struct i915_gpu_error *error = &dev_priv->gpu_error; int ret; - lockdep_assert_held(&dev->struct_mutex); + lockdep_assert_held(&dev_priv->drm.struct_mutex); if (!test_and_clear_bit(I915_RESET_IN_PROGRESS, &error->flags)) return; @@ -1814,7 +1813,7 @@ void i915_reset(struct drm_i915_private *dev_priv) * was running at the time of the reset (i.e. we weren't VT * switched away). */ - ret = i915_gem_init_hw(dev); + ret = i915_gem_init_hw(dev_priv); if (ret) { DRM_ERROR("Failed hw init on reset %d\n", ret); goto error; @@ -2328,7 +2327,7 @@ static int intel_runtime_suspend(struct device *kdev) */ i915_gem_runtime_suspend(dev_priv); - intel_guc_suspend(dev); + intel_guc_suspend(dev_priv); intel_runtime_pm_disable_interrupts(dev_priv); @@ -2413,7 +2412,7 @@ static int intel_runtime_resume(struct device *kdev) if (intel_uncore_unclaimed_mmio(dev_priv)) DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); - intel_guc_resume(dev); + intel_guc_resume(dev_priv); if (IS_GEN6(dev_priv)) intel_init_pch_refclk(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8b725d13d24e..bbc6d0f2d8bf 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3177,14 +3177,14 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error) void i915_gem_reset(struct drm_i915_private *dev_priv); void i915_gem_set_wedged(struct drm_i915_private *dev_priv); void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); -int __must_check i915_gem_init(struct drm_device *dev); -int __must_check i915_gem_init_hw(struct drm_device *dev); +int __must_check i915_gem_init(struct drm_i915_private *dev_priv); +int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv); void i915_gem_init_swizzling(struct drm_i915_private *dev_priv); void i915_gem_cleanup_engines(struct drm_device *dev); int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, unsigned int flags); -int __must_check i915_gem_suspend(struct drm_device *dev); -void i915_gem_resume(struct drm_device *dev); +int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv); +void i915_gem_resume(struct drm_i915_private *dev_priv); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int i915_gem_object_wait(struct drm_i915_gem_object *obj, unsigned int flags, @@ -3267,7 +3267,7 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, struct sg_table *pages); /* i915_gem_context.c */ -int __must_check i915_gem_context_init(struct drm_device *dev); +int __must_check i915_gem_context_init(struct drm_i915_private *dev_priv); void i915_gem_context_lost(struct drm_i915_private *dev_priv); void i915_gem_context_fini(struct drm_device *dev); int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 10c3b505f49a..b2a2e5843dc8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4194,9 +4194,9 @@ static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv) GEM_BUG_ON(engine->last_context != dev_priv->kernel_context); } -int i915_gem_suspend(struct drm_device *dev) +int i915_gem_suspend(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_device *dev = &dev_priv->drm; int ret; intel_suspend_gt_powersave(dev_priv); @@ -4270,9 +4270,9 @@ err: return ret; } -void i915_gem_resume(struct drm_device *dev) +void i915_gem_resume(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_device *dev = &dev_priv->drm; WARN_ON(dev_priv->gt.awake); @@ -4337,9 +4337,8 @@ static void init_unused_rings(struct drm_i915_private *dev_priv) } int -i915_gem_init_hw(struct drm_device *dev) +i915_gem_init_hw(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; enum intel_engine_id id; int ret; @@ -4393,10 +4392,10 @@ i915_gem_init_hw(struct drm_device *dev) goto out; } - intel_mocs_init_l3cc_table(dev); + intel_mocs_init_l3cc_table(dev_priv); /* We can't enable contexts until all firmware is loaded */ - ret = intel_guc_setup(dev); + ret = intel_guc_setup(dev_priv); if (ret) goto out; @@ -4426,12 +4425,11 @@ bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value) return true; } -int i915_gem_init(struct drm_device *dev) +int i915_gem_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); int ret; - mutex_lock(&dev->struct_mutex); + mutex_lock(&dev_priv->drm.struct_mutex); if (!i915.enable_execlists) { dev_priv->gt.resume = intel_legacy_submission_resume; @@ -4455,15 +4453,15 @@ int i915_gem_init(struct drm_device *dev) if (ret) goto out_unlock; - ret = i915_gem_context_init(dev); + ret = i915_gem_context_init(dev_priv); if (ret) goto out_unlock; - ret = intel_engines_init(dev); + ret = intel_engines_init(dev_priv); if (ret) goto out_unlock; - ret = i915_gem_init_hw(dev); + ret = i915_gem_init_hw(dev_priv); if (ret == -EIO) { /* Allow engine initialisation to fail by marking the GPU as * wedged. But we only want to do this where the GPU is angry, @@ -4476,7 +4474,7 @@ int i915_gem_init(struct drm_device *dev) out_unlock: intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev_priv->drm.struct_mutex); return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 5241b51dd986..ea847e865137 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -167,13 +167,12 @@ void i915_gem_context_free(struct kref *ctx_ref) } static struct drm_i915_gem_object * -alloc_context_obj(struct drm_device *dev, u64 size) +alloc_context_obj(struct drm_i915_private *dev_priv, u64 size) { - struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; int ret; - lockdep_assert_held(&dev->struct_mutex); + lockdep_assert_held(&dev_priv->drm.struct_mutex); obj = i915_gem_object_create(dev_priv, size); if (IS_ERR(obj)) @@ -260,10 +259,9 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out) } static struct i915_gem_context * -__create_hw_context(struct drm_device *dev, +__create_hw_context(struct drm_i915_private *dev_priv, struct drm_i915_file_private *file_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct i915_gem_context *ctx; int ret; @@ -287,7 +285,7 @@ __create_hw_context(struct drm_device *dev, struct drm_i915_gem_object *obj; struct i915_vma *vma; - obj = alloc_context_obj(dev, dev_priv->hw_context_size); + obj = alloc_context_obj(dev_priv, dev_priv->hw_context_size); if (IS_ERR(obj)) { ret = PTR_ERR(obj); goto err_out; @@ -353,21 +351,21 @@ err_out: * well as an idle case. */ static struct i915_gem_context * -i915_gem_create_context(struct drm_device *dev, +i915_gem_create_context(struct drm_i915_private *dev_priv, struct drm_i915_file_private *file_priv) { struct i915_gem_context *ctx; - lockdep_assert_held(&dev->struct_mutex); + lockdep_assert_held(&dev_priv->drm.struct_mutex); - ctx = __create_hw_context(dev, file_priv); + ctx = __create_hw_context(dev_priv, file_priv); if (IS_ERR(ctx)) return ctx; - if (USES_FULL_PPGTT(dev)) { + if (USES_FULL_PPGTT(dev_priv)) { struct i915_hw_ppgtt *ppgtt; - ppgtt = i915_ppgtt_create(to_i915(dev), file_priv, ctx->name); + ppgtt = i915_ppgtt_create(dev_priv, file_priv, ctx->name); if (IS_ERR(ppgtt)) { DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", PTR_ERR(ppgtt)); @@ -407,7 +405,7 @@ i915_gem_context_create_gvt(struct drm_device *dev) if (ret) return ERR_PTR(ret); - ctx = i915_gem_create_context(dev, NULL); + ctx = i915_gem_create_context(to_i915(dev), NULL); if (IS_ERR(ctx)) goto out; @@ -433,9 +431,8 @@ static void i915_gem_context_unpin(struct i915_gem_context *ctx, } } -int i915_gem_context_init(struct drm_device *dev) +int i915_gem_context_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct i915_gem_context *ctx; /* Init should only be called once per module load. Eventually the @@ -469,7 +466,7 @@ int i915_gem_context_init(struct drm_device *dev) } } - ctx = i915_gem_create_context(dev, NULL); + ctx = i915_gem_create_context(dev_priv, NULL); if (IS_ERR(ctx)) { DRM_ERROR("Failed to create default global context (error %ld)\n", PTR_ERR(ctx)); @@ -551,7 +548,7 @@ int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) idr_init(&file_priv->context_idr); mutex_lock(&dev->struct_mutex); - ctx = i915_gem_create_context(dev, file_priv); + ctx = i915_gem_create_context(to_i915(dev), file_priv); mutex_unlock(&dev->struct_mutex); if (IS_ERR(ctx)) { @@ -1034,7 +1031,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - ctx = i915_gem_create_context(dev, file_priv); + ctx = i915_gem_create_context(to_i915(dev), file_priv); mutex_unlock(&dev->struct_mutex); if (IS_ERR(ctx)) return PTR_ERR(ctx); diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 1003b443112c..7fa4e74c1dd3 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -1485,11 +1485,10 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv) /** * intel_guc_suspend() - notify GuC entering suspend state - * @dev: drm device + * @dev_priv: i915 device private */ -int intel_guc_suspend(struct drm_device *dev) +int intel_guc_suspend(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_guc *guc = &dev_priv->guc; struct i915_gem_context *ctx; u32 data[3]; @@ -1513,11 +1512,10 @@ int intel_guc_suspend(struct drm_device *dev) /** * intel_guc_resume() - notify GuC resuming from suspend state - * @dev: drm device + * @dev_priv: i915 device private */ -int intel_guc_resume(struct drm_device *dev) +int intel_guc_resume(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_guc *guc = &dev_priv->guc; struct i915_gem_context *ctx; u32 data[3]; diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 348fae620811..e8afe1185831 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -111,13 +111,12 @@ intel_engine_setup(struct drm_i915_private *dev_priv, /** * intel_engines_init() - allocate, populate and init the Engine Command Streamers - * @dev: DRM device. + * @dev_priv: i915 device private * * Return: non-zero if the initialization failed. */ -int intel_engines_init(struct drm_device *dev) +int intel_engines_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_device_info *device_info = mkwrite_device_info(dev_priv); unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask; unsigned int mask = 0; diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index 9926747d160f..260ffe5e693b 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -437,7 +437,7 @@ static int guc_hw_reset(struct drm_i915_private *dev_priv) /** * intel_guc_setup() - finish preparing the GuC for activity - * @dev: drm device + * @dev_priv: i915 device private * * Called from gem_init_hw() during driver loading and also after a GPU reset. * @@ -448,9 +448,8 @@ static int guc_hw_reset(struct drm_i915_private *dev_priv) * * Return: non-zero code on error */ -int intel_guc_setup(struct drm_device *dev) +int intel_guc_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; const char *fw_path = guc_fw->guc_fw_path; int retries, ret, err; @@ -588,10 +587,10 @@ fail: return ret; } -static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) +static void guc_fw_fetch(struct drm_i915_private *dev_priv, + struct intel_guc_fw *guc_fw) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct pci_dev *pdev = dev->pdev; + struct pci_dev *pdev = dev_priv->drm.pdev; struct drm_i915_gem_object *obj; const struct firmware *fw = NULL; struct guc_css_header *css; @@ -676,9 +675,9 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found, guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted); - mutex_lock(&dev->struct_mutex); + mutex_lock(&dev_priv->drm.struct_mutex); obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size); - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev_priv->drm.struct_mutex); if (IS_ERR_OR_NULL(obj)) { err = obj ? PTR_ERR(obj) : -ENOMEM; goto fail; @@ -700,12 +699,12 @@ fail: DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n", err, fw, guc_fw->guc_fw_obj); - mutex_lock(&dev->struct_mutex); + mutex_lock(&dev_priv->drm.struct_mutex); obj = guc_fw->guc_fw_obj; if (obj) i915_gem_object_put(obj); guc_fw->guc_fw_obj = NULL; - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev_priv->drm.struct_mutex); release_firmware(fw); /* OK even if fw is NULL */ guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL; @@ -713,16 +712,15 @@ fail: /** * intel_guc_init() - define parameters and fetch firmware - * @dev: drm device + * @dev_priv: i915 device private * * Called early during driver load, but after GEM is initialised. * * The firmware will be transferred to the GuC's memory later, * when intel_guc_setup() is called. */ -void intel_guc_init(struct drm_device *dev) +void intel_guc_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; const char *fw_path; @@ -769,7 +767,7 @@ void intel_guc_init(struct drm_device *dev) guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING; DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path); - guc_fw_fetch(dev, guc_fw); + guc_fw_fetch(dev_priv, guc_fw); /* status must now be FAIL or SUCCESS */ } @@ -777,12 +775,11 @@ void intel_guc_init(struct drm_device *dev) * intel_guc_fini() - clean up all allocated resources * @dev: drm device */ -void intel_guc_fini(struct drm_device *dev) +void intel_guc_fini(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; - mutex_lock(&dev->struct_mutex); + mutex_lock(&dev_priv->drm.struct_mutex); guc_interrupts_release(dev_priv); i915_guc_submission_disable(dev_priv); i915_guc_submission_fini(dev_priv); @@ -790,7 +787,7 @@ void intel_guc_fini(struct drm_device *dev) if (guc_fw->guc_fw_obj) i915_gem_object_put(guc_fw->guc_fw_obj); guc_fw->guc_fw_obj = NULL; - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev_priv->drm.struct_mutex); guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE; } diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index c1f546180ba2..7c6403243394 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -70,7 +70,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine); int logical_render_ring_init(struct intel_engine_cs *engine); int logical_xcs_ring_init(struct intel_engine_cs *engine); -int intel_engines_init(struct drm_device *dev); +int intel_engines_init(struct drm_i915_private *dev_priv); /* Logical Ring Contexts */ diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index 80bb9247ce66..4f8829c0845e 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c @@ -380,7 +380,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, /** * intel_mocs_init_l3cc_table() - program the mocs control table - * @dev: The the device to be programmed. + * @dev_priv: i915 device private * * This function simply programs the mocs registers for the given table * starting at the given address. This register set is programmed in pairs. @@ -392,9 +392,8 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, * * Return: Nothing. */ -void intel_mocs_init_l3cc_table(struct drm_device *dev) +void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_mocs_table table; unsigned int i; diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h index a8bd9f7bfece..ce4a5dfa5f94 100644 --- a/drivers/gpu/drm/i915/intel_mocs.h +++ b/drivers/gpu/drm/i915/intel_mocs.h @@ -53,7 +53,7 @@ #include "i915_drv.h" int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req); -void intel_mocs_init_l3cc_table(struct drm_device *dev); +void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv); int intel_mocs_init_engine(struct intel_engine_cs *engine); #endif diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h index 8507a8f35b72..11f56082b363 100644 --- a/drivers/gpu/drm/i915/intel_uc.h +++ b/drivers/gpu/drm/i915/intel_uc.h @@ -178,12 +178,12 @@ int intel_guc_log_flush(struct intel_guc *guc); int intel_guc_log_control(struct intel_guc *guc, u32 control_val); /* intel_guc_loader.c */ -extern void intel_guc_init(struct drm_device *dev); -extern int intel_guc_setup(struct drm_device *dev); -extern void intel_guc_fini(struct drm_device *dev); +extern void intel_guc_init(struct drm_i915_private *dev_priv); +extern int intel_guc_setup(struct drm_i915_private *dev_priv); +extern void intel_guc_fini(struct drm_i915_private *dev_priv); extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status); -extern int intel_guc_suspend(struct drm_device *dev); -extern int intel_guc_resume(struct drm_device *dev); +extern int intel_guc_suspend(struct drm_i915_private *dev_priv); +extern int intel_guc_resume(struct drm_i915_private *dev_priv); /* i915_guc_submission.c */ int i915_guc_submission_init(struct drm_i915_private *dev_priv); -- cgit v1.2.3 From cb15d9f8c30c868f6117d96b6fa58129785026f6 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 1 Dec 2016 14:16:39 +0000 Subject: drm/i915: More GEM init dev_priv cleanup Simplifies the code to pass the right parameter in. v2: Commit message. (Joonas Lahtinen) Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Reviewed-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_drv.c | 8 ++++---- drivers/gpu/drm/i915/i915_drv.h | 8 ++++---- drivers/gpu/drm/i915/i915_gem.c | 10 +++------- drivers/gpu/drm/i915/i915_gem_context.c | 5 ++--- 4 files changed, 13 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index a7f85bdf8d66..383bf1ac1bdd 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -544,8 +544,8 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { static void i915_gem_fini(struct drm_i915_private *dev_priv) { mutex_lock(&dev_priv->drm.struct_mutex); - i915_gem_cleanup_engines(&dev_priv->drm); - i915_gem_context_fini(&dev_priv->drm); + i915_gem_cleanup_engines(dev_priv); + i915_gem_context_fini(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); rcu_barrier(); @@ -833,7 +833,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, intel_init_display_hooks(dev_priv); intel_init_clock_gating_hooks(dev_priv); intel_init_audio_hooks(dev_priv); - ret = i915_gem_load_init(&dev_priv->drm); + ret = i915_gem_load_init(dev_priv); if (ret < 0) goto err_gvt; @@ -861,7 +861,7 @@ err_workqueues: static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) { i915_perf_fini(dev_priv); - i915_gem_load_cleanup(&dev_priv->drm); + i915_gem_load_cleanup(dev_priv); i915_workqueues_cleanup(dev_priv); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index bbc6d0f2d8bf..20bc0aef656a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2975,8 +2975,8 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -int i915_gem_load_init(struct drm_device *dev); -void i915_gem_load_cleanup(struct drm_device *dev); +int i915_gem_load_init(struct drm_i915_private *dev_priv); +void i915_gem_load_cleanup(struct drm_i915_private *dev_priv); void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); int i915_gem_freeze(struct drm_i915_private *dev_priv); int i915_gem_freeze_late(struct drm_i915_private *dev_priv); @@ -3180,7 +3180,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); int __must_check i915_gem_init(struct drm_i915_private *dev_priv); int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv); void i915_gem_init_swizzling(struct drm_i915_private *dev_priv); -void i915_gem_cleanup_engines(struct drm_device *dev); +void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv); int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, unsigned int flags); int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv); @@ -3269,7 +3269,7 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, /* i915_gem_context.c */ int __must_check i915_gem_context_init(struct drm_i915_private *dev_priv); void i915_gem_context_lost(struct drm_i915_private *dev_priv); -void i915_gem_context_fini(struct drm_device *dev); +void i915_gem_context_fini(struct drm_i915_private *dev_priv); int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); int i915_switch_context(struct drm_i915_gem_request *req); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b2a2e5843dc8..59065ae0b153 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4480,9 +4480,8 @@ out_unlock: } void -i915_gem_cleanup_engines(struct drm_device *dev) +i915_gem_cleanup_engines(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; enum intel_engine_id id; @@ -4522,9 +4521,8 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv) } int -i915_gem_load_init(struct drm_device *dev) +i915_gem_load_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); int err = -ENOMEM; dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN); @@ -4593,10 +4591,8 @@ err_out: return err; } -void i915_gem_load_cleanup(struct drm_device *dev) +void i915_gem_load_cleanup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - WARN_ON(!llist_empty(&dev_priv->mm.free_list)); mutex_lock(&dev_priv->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index ea847e865137..a57c22659a3c 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -519,12 +519,11 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv) } } -void i915_gem_context_fini(struct drm_device *dev) +void i915_gem_context_fini(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct i915_gem_context *dctx = dev_priv->kernel_context; - lockdep_assert_held(&dev->struct_mutex); + lockdep_assert_held(&dev_priv->drm.struct_mutex); context_close(dctx); dev_priv->kernel_context = NULL; -- cgit v1.2.3 From da5f53bf3c3ad5ffcb88b2892a9c02ece2ad140f Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 1 Dec 2016 14:16:40 +0000 Subject: drm/i915: dev_priv cleanup in bridge/bar/mmio init code dev_priv is more appropriate for these so converting saves some lines of source. v2: Commit message and keep the pdev local variable. (Joonas Lahtinen) Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Reviewed-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_drv.c | 45 ++++++++++++++++------------------------- 1 file changed, 17 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 383bf1ac1bdd..1312cd5e4465 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -142,9 +142,8 @@ static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv) return ret; } -static void intel_detect_pch(struct drm_device *dev) +static void intel_detect_pch(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pch = NULL; /* In all current cases, num_pipes is equivalent to the PCH_NOP setting @@ -361,10 +360,8 @@ static int i915_getparam(struct drm_device *dev, void *data, return 0; } -static int i915_get_bridge_dev(struct drm_device *dev) +static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); if (!dev_priv->bridge_dev) { DRM_ERROR("bridge device not found\n"); @@ -375,9 +372,8 @@ static int i915_get_bridge_dev(struct drm_device *dev) /* Allocate space for the MCH regs if needed, return nonzero on error */ static int -intel_alloc_mchbar_resource(struct drm_device *dev) +intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; u32 temp_lo, temp_hi = 0; u64 mchbar_addr; @@ -421,9 +417,8 @@ intel_alloc_mchbar_resource(struct drm_device *dev) /* Setup MCHBAR if possible, return true if we should disable it again */ static void -intel_setup_mchbar(struct drm_device *dev) +intel_setup_mchbar(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; u32 temp; bool enabled; @@ -445,7 +440,7 @@ intel_setup_mchbar(struct drm_device *dev) if (enabled) return; - if (intel_alloc_mchbar_resource(dev)) + if (intel_alloc_mchbar_resource(dev_priv)) return; dev_priv->mchbar_need_disable = true; @@ -461,9 +456,8 @@ intel_setup_mchbar(struct drm_device *dev) } static void -intel_teardown_mchbar(struct drm_device *dev) +intel_teardown_mchbar(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915; if (dev_priv->mchbar_need_disable) { @@ -493,9 +487,9 @@ intel_teardown_mchbar(struct drm_device *dev) /* true = enable decode, false = disable decoder */ static unsigned int i915_vga_set_decode(void *cookie, bool state) { - struct drm_device *dev = cookie; + struct drm_i915_private *dev_priv = cookie; - intel_modeset_vga_set_state(to_i915(dev), state); + intel_modeset_vga_set_state(dev_priv, state); if (state) return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; @@ -574,7 +568,7 @@ static int i915_load_modeset_init(struct drm_device *dev) * then we do not take part in VGA arbitration and the * vga_client_register() fails with -ENODEV. */ - ret = vga_client_register(pdev, dev, NULL, i915_vga_set_decode); + ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode); if (ret && ret != -ENODEV) goto out; @@ -823,7 +817,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, goto err_workqueues; /* This must be called before any calls to HAS_PCH_* */ - intel_detect_pch(&dev_priv->drm); + intel_detect_pch(dev_priv); intel_pm_setup(&dev_priv->drm); intel_init_dpio(dev_priv); @@ -865,9 +859,8 @@ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) i915_workqueues_cleanup(dev_priv); } -static int i915_mmio_setup(struct drm_device *dev) +static int i915_mmio_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = dev_priv->drm.pdev; int mmio_bar; int mmio_size; @@ -893,17 +886,16 @@ static int i915_mmio_setup(struct drm_device *dev) } /* Try to make sure MCHBAR is enabled before poking at it */ - intel_setup_mchbar(dev); + intel_setup_mchbar(dev_priv); return 0; } -static void i915_mmio_cleanup(struct drm_device *dev) +static void i915_mmio_cleanup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = dev_priv->drm.pdev; - intel_teardown_mchbar(dev); + intel_teardown_mchbar(dev_priv); pci_iounmap(pdev, dev_priv->regs); } @@ -918,16 +910,15 @@ static void i915_mmio_cleanup(struct drm_device *dev) */ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) { - struct drm_device *dev = &dev_priv->drm; int ret; if (i915_inject_load_failure()) return -ENODEV; - if (i915_get_bridge_dev(dev)) + if (i915_get_bridge_dev(dev_priv)) return -EIO; - ret = i915_mmio_setup(dev); + ret = i915_mmio_setup(dev_priv); if (ret < 0) goto put_bridge; @@ -947,10 +938,8 @@ put_bridge: */ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) { - struct drm_device *dev = &dev_priv->drm; - intel_uncore_fini(dev_priv); - i915_mmio_cleanup(dev); + i915_mmio_cleanup(dev_priv); pci_dev_put(dev_priv->bridge_dev); } -- cgit v1.2.3 From 7f26cb88014a748d03ecc0dcb6a5876f311b823d Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 1 Dec 2016 14:16:41 +0000 Subject: drm/i915: Unexport VGA switcheroo functions They are only used in i915_drv.c so a forward declaration is enough. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Reviewed-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_drv.c | 5 ++++- drivers/gpu/drm/i915/i915_drv.h | 3 --- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 1312cd5e4465..362c8baef640 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -497,6 +497,9 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state) return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; } +static int i915_resume_switcheroo(struct drm_device *dev); +static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); + static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) { struct drm_device *dev = pci_get_drvdata(pdev); @@ -1710,7 +1713,7 @@ out: return ret; } -int i915_resume_switcheroo(struct drm_device *dev) +static int i915_resume_switcheroo(struct drm_device *dev) { int ret; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 20bc0aef656a..6cb4bb349c89 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2759,9 +2759,6 @@ static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) return false; } -extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); -extern int i915_resume_switcheroo(struct drm_device *dev); - int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, int enable_ppgtt); -- cgit v1.2.3 From 401964465f04f3c01dd7ddfe6b5149bc33eaa509 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 1 Dec 2016 14:16:42 +0000 Subject: drm/i915: Make gmbus setup take dev_priv Simplify the code by passing the right argument in. v2: Commit message. (Joonas Lahtinen) Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Reviewed-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_drv.c | 4 ++-- drivers/gpu/drm/i915/i915_drv.h | 4 ++-- drivers/gpu/drm/i915/intel_display.c | 2 +- drivers/gpu/drm/i915/intel_i2c.c | 8 +++----- 4 files changed, 8 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 362c8baef640..fee0b46a04bc 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -592,7 +592,7 @@ static int i915_load_modeset_init(struct drm_device *dev) if (ret) goto cleanup_csr; - intel_setup_gmbus(dev); + intel_setup_gmbus(dev_priv); /* Important: The output setup functions called by modeset_init need * working irqs for e.g. gmbus and dp aux transfers. */ @@ -629,7 +629,7 @@ cleanup_gem: cleanup_irq: intel_guc_fini(dev_priv); drm_irq_uninstall(dev); - intel_teardown_gmbus(dev); + intel_teardown_gmbus(dev_priv); cleanup_csr: intel_csr_ucode_fini(dev_priv); intel_power_domains_fini(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6cb4bb349c89..44aecad47b21 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3478,8 +3478,8 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv); void i915_teardown_sysfs(struct drm_i915_private *dev_priv); /* intel_i2c.c */ -extern int intel_setup_gmbus(struct drm_device *dev); -extern void intel_teardown_gmbus(struct drm_device *dev); +extern int intel_setup_gmbus(struct drm_i915_private *dev_priv); +extern void intel_teardown_gmbus(struct drm_i915_private *dev_priv); extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, unsigned int pin); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b91e0abf66ba..e5573a3ca704 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -17162,7 +17162,7 @@ void intel_modeset_cleanup(struct drm_device *dev) intel_cleanup_gt_powersave(dev_priv); - intel_teardown_gmbus(dev); + intel_teardown_gmbus(dev_priv); } void intel_connector_attach_encoder(struct intel_connector *connector, diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 83f260bb4eef..85faba415633 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -617,11 +617,10 @@ static const struct i2c_algorithm gmbus_algorithm = { /** * intel_gmbus_setup - instantiate all Intel i2c GMBuses - * @dev: DRM device + * @dev_priv: i915 device private */ -int intel_setup_gmbus(struct drm_device *dev) +int intel_setup_gmbus(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = dev_priv->drm.pdev; struct intel_gmbus *bus; unsigned int pin; @@ -724,9 +723,8 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) mutex_unlock(&dev_priv->gmbus_mutex); } -void intel_teardown_gmbus(struct drm_device *dev) +void intel_teardown_gmbus(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_gmbus *bus; unsigned int pin; -- cgit v1.2.3 From 12ff05e750dff5ae098e8b2803363d9707214735 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 1 Dec 2016 14:16:43 +0000 Subject: drm/i915: Make i915_destroy_error_state take dev_priv Since it does not need dev at all. Also change the stored pointer in struct i915_error_state_file_priv to i915. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Reviewed-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_debugfs.c | 8 ++++---- drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 6 +++--- drivers/gpu/drm/i915/i915_gpu_error.c | 5 ++--- drivers/gpu/drm/i915/i915_sysfs.c | 4 ++-- 5 files changed, 12 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index d179eae1b1f6..567980833d76 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -946,7 +946,7 @@ i915_error_state_write(struct file *filp, struct i915_error_state_file_priv *error_priv = filp->private_data; DRM_DEBUG_DRIVER("Resetting error state\n"); - i915_destroy_error_state(error_priv->dev); + i915_destroy_error_state(error_priv->i915); return cnt; } @@ -960,7 +960,7 @@ static int i915_error_state_open(struct inode *inode, struct file *file) if (!error_priv) return -ENOMEM; - error_priv->dev = &dev_priv->drm; + error_priv->i915 = dev_priv; i915_error_state_get(&dev_priv->drm, error_priv); @@ -988,8 +988,8 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf, ssize_t ret_count = 0; int ret; - ret = i915_error_state_buf_init(&error_str, - to_i915(error_priv->dev), count, *pos); + ret = i915_error_state_buf_init(&error_str, error_priv->i915, + count, *pos); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index fee0b46a04bc..809315cad316 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1307,7 +1307,7 @@ void i915_driver_unload(struct drm_device *dev) /* Free error state after interrupts are fully disabled. */ cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); - i915_destroy_error_state(dev); + i915_destroy_error_state(dev_priv); /* Flush any outstanding unpin_work. */ drain_workqueue(dev_priv->wq); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 44aecad47b21..2006ea033354 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1459,7 +1459,7 @@ struct drm_i915_error_state_buf { }; struct i915_error_state_file_priv { - struct drm_device *dev; + struct drm_i915_private *i915; struct drm_i915_error_state *error; }; @@ -3434,7 +3434,7 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv, void i915_error_state_get(struct drm_device *dev, struct i915_error_state_file_priv *error_priv); void i915_error_state_put(struct i915_error_state_file_priv *error_priv); -void i915_destroy_error_state(struct drm_device *dev); +void i915_destroy_error_state(struct drm_i915_private *dev_priv); #else @@ -3444,7 +3444,7 @@ static inline void i915_capture_error_state(struct drm_i915_private *dev_priv, { } -static inline void i915_destroy_error_state(struct drm_device *dev) +static inline void i915_destroy_error_state(struct drm_i915_private *dev_priv) { } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 82458ea60150..a14f7badc337 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -514,7 +514,7 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m, int i915_error_state_to_str(struct drm_i915_error_state_buf *m, const struct i915_error_state_file_priv *error_priv) { - struct drm_i915_private *dev_priv = to_i915(error_priv->dev); + struct drm_i915_private *dev_priv = error_priv->i915; struct pci_dev *pdev = dev_priv->drm.pdev; struct drm_i915_error_state *error = error_priv->error; struct drm_i915_error_object *obj; @@ -1644,9 +1644,8 @@ void i915_error_state_put(struct i915_error_state_file_priv *error_priv) kref_put(&error_priv->error->ref, i915_error_state_free); } -void i915_destroy_error_state(struct drm_device *dev) +void i915_destroy_error_state(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_error_state *error; spin_lock_irq(&dev_priv->gpu_error.lock); diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 47590ab08d7e..b99fd9668317 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -535,7 +535,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj, if (ret) return ret; - error_priv.dev = dev; + error_priv.i915 = dev_priv; i915_error_state_get(dev, &error_priv); ret = i915_error_state_to_str(&error_str, &error_priv); @@ -560,7 +560,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj, struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); DRM_DEBUG_DRIVER("Resetting error state\n"); - i915_destroy_error_state(&dev_priv->drm); + i915_destroy_error_state(dev_priv); return count; } -- cgit v1.2.3 From af6dc7425b359519cdc9399b19029a037827dd21 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 1 Dec 2016 14:16:44 +0000 Subject: drm/i915: Make i915_save/restore_state and intel_i2c_reset take dev_priv dev_priv is more appropriate since it is used much more in these. v2: Commit message and keep the local pdev variable. (Joonas Lahtinen) Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Reviewed-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_drv.c | 4 ++-- drivers/gpu/drm/i915/i915_drv.h | 6 +++--- drivers/gpu/drm/i915/i915_suspend.c | 16 +++++++--------- drivers/gpu/drm/i915/intel_i2c.c | 8 +++----- 4 files changed, 15 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 809315cad316..bd8994d02813 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1439,7 +1439,7 @@ static int i915_drm_suspend(struct drm_device *dev) i915_gem_suspend_gtt_mappings(dev_priv); - i915_save_state(dev); + i915_save_state(dev_priv); opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; intel_opregion_notify_adapter(dev_priv, opregion_target_state); @@ -1562,7 +1562,7 @@ static int i915_drm_resume(struct drm_device *dev) i915_gem_resume(dev_priv); - i915_restore_state(dev); + i915_restore_state(dev_priv); intel_pps_unlock_regs_wa(dev_priv); intel_opregion_setup(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2006ea033354..90ef5a09b40d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3470,8 +3470,8 @@ extern void i915_perf_register(struct drm_i915_private *dev_priv); extern void i915_perf_unregister(struct drm_i915_private *dev_priv); /* i915_suspend.c */ -extern int i915_save_state(struct drm_device *dev); -extern int i915_restore_state(struct drm_device *dev); +extern int i915_save_state(struct drm_i915_private *dev_priv); +extern int i915_restore_state(struct drm_i915_private *dev_priv); /* i915_sysfs.c */ void i915_setup_sysfs(struct drm_i915_private *dev_priv); @@ -3491,7 +3491,7 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) { return container_of(adapter, struct intel_gmbus, adapter)->force_bit; } -extern void intel_i2c_reset(struct drm_device *dev); +extern void intel_i2c_reset(struct drm_i915_private *dev_priv); /* intel_bios.c */ int intel_bios_init(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index b0e1e7ca75da..5c86925a0294 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -56,13 +56,12 @@ static void i915_restore_display(struct drm_i915_private *dev_priv) i915_redisable_vga(dev_priv); } -int i915_save_state(struct drm_device *dev) +int i915_save_state(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = dev_priv->drm.pdev; int i; - mutex_lock(&dev->struct_mutex); + mutex_lock(&dev_priv->drm.struct_mutex); i915_save_display(dev_priv); @@ -97,18 +96,17 @@ int i915_save_state(struct drm_device *dev) dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i)); } - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev_priv->drm.struct_mutex); return 0; } -int i915_restore_state(struct drm_device *dev) +int i915_restore_state(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = dev_priv->drm.pdev; int i; - mutex_lock(&dev->struct_mutex); + mutex_lock(&dev_priv->drm.struct_mutex); i915_gem_restore_fences(dev_priv); @@ -145,9 +143,9 @@ int i915_restore_state(struct drm_device *dev) I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]); } - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev_priv->drm.struct_mutex); - intel_i2c_reset(dev); + intel_i2c_reset(dev_priv); return 0; } diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 85faba415633..62fe529516b1 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -111,10 +111,8 @@ to_intel_gmbus(struct i2c_adapter *i2c) } void -intel_i2c_reset(struct drm_device *dev) +intel_i2c_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - I915_WRITE(GMBUS0, 0); I915_WRITE(GMBUS4, 0); } @@ -211,7 +209,7 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter) adapter); struct drm_i915_private *dev_priv = bus->dev_priv; - intel_i2c_reset(&dev_priv->drm); + intel_i2c_reset(dev_priv); intel_i2c_quirk_set(dev_priv, true); set_data(bus, 1); set_clock(bus, 1); @@ -677,7 +675,7 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv) goto err; } - intel_i2c_reset(&dev_priv->drm); + intel_i2c_reset(dev_priv); return 0; -- cgit v1.2.3 From 192aa18142b28fdcb63b12984e02466ced382a54 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 1 Dec 2016 14:16:45 +0000 Subject: drm/i915: Make intel_pm_setup take dev_priv Function actually wants dev_priv so give it to it. v2: Commit message. (Joonas Lahtinen) Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/1480601805-14391-2-git-send-email-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/intel_drv.h | 2 +- drivers/gpu/drm/i915/intel_pm.c | 4 +--- 3 files changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index bd8994d02813..ace9be8f6ff9 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -822,7 +822,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, /* This must be called before any calls to HAS_PCH_* */ intel_detect_pch(dev_priv); - intel_pm_setup(&dev_priv->drm); + intel_pm_setup(dev_priv); intel_init_dpio(dev_priv); intel_power_domains_init(dev_priv); intel_irq_init(dev_priv); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 460f3742ebd7..1d126c29598f 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1724,7 +1724,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv); void intel_update_watermarks(struct intel_crtc *crtc); void intel_init_pm(struct drm_i915_private *dev_priv); void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv); -void intel_pm_setup(struct drm_device *dev); +void intel_pm_setup(struct drm_i915_private *dev_priv); void intel_gpu_ips_init(struct drm_i915_private *dev_priv); void intel_gpu_ips_teardown(void); void intel_init_gt_powersave(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 70108aa2309c..adf208fc6a2d 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -7955,10 +7955,8 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req) queue_work(req->i915->wq, &boost->work); } -void intel_pm_setup(struct drm_device *dev) +void intel_pm_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - mutex_init(&dev_priv->rps.hw_lock); spin_lock_init(&dev_priv->rps.client_lock); -- cgit v1.2.3 From 93de056babc5f0e3df04f0c3cf29e54204d0e65d Mon Sep 17 00:00:00 2001 From: Ander Conselvan de Oliveira Date: Tue, 29 Nov 2016 13:48:47 +0200 Subject: drm/i915: Fix intel_psr_init() kerneldoc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In commit c39055b072f8 ("drm/i915: Pass dev_priv to intel_setup_outputs()"), I forgot to update the kerneldoc for intel_psr_init() init, leading to warnings when building the documentation: drivers/gpu/drm/i915/intel_psr.c:822: warning: No description found for parameter 'dev_priv' drivers/gpu/drm/i915/intel_psr.c:822: warning: Excess function parameter 'dev' description in 'intel_psr_init' Fixes: c39055b072f8 ("drm/i915: Pass dev_priv to intel_setup_outputs()") Cc: Ander Conselvan de Oliveira Cc: Ville Syrjälä Cc: Daniel Vetter Cc: Jani Nikula Cc: intel-gfx@lists.freedesktop.org Signed-off-by: Ander Conselvan de Oliveira Reviewed-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1480420127-11382-1-git-send-email-ander.conselvan.de.oliveira@intel.com --- drivers/gpu/drm/i915/intel_psr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 5c3616e54577..d5f8d03c6583 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -813,7 +813,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, /** * intel_psr_init - Init basic PSR work and mutex. - * @dev: DRM device + * @dev_priv: i915 device private * * This function is called only once at driver load to initialize basic * PSR stuff. -- cgit v1.2.3 From b6ea8b4a8ef80626dce698041577321662d9d47a Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 2 Dec 2016 08:43:53 +0000 Subject: drm/i915: Fix kerneldoc for intel_guc_fini Recent refactoring forgot to update this one. Signed-off-by: Tvrtko Ursulin Fixes: bf9e8429ab97 ("drm/i915: Make various init functions take dev_priv") Cc: Tvrtko Ursulin Cc: Chris Wilson Cc: Joonas Lahtinen Cc: Daniel Vetter Cc: Jani Nikula Cc: intel-gfx@lists.freedesktop.org Reviewed-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/1480668233-25372-1-git-send-email-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/intel_guc_loader.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index 260ffe5e693b..21db69705458 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -773,7 +773,7 @@ void intel_guc_init(struct drm_i915_private *dev_priv) /** * intel_guc_fini() - clean up all allocated resources - * @dev: drm device + * @dev_priv: i915 device private */ void intel_guc_fini(struct drm_i915_private *dev_priv) { -- cgit v1.2.3 From a9a251c2aea5aaeb004eac4e6c2b86383347ce9f Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Fri, 2 Dec 2016 10:24:11 +0000 Subject: drm/i915: make i915_suspend_switcheroo static Looks like this was missed when unexporting, so let's keep sparse happy. Cc: Tvrtko Ursulin Fixes: 7f26cb88014a ("drm/i915: Unexport VGA switcheroo functions") Signed-off-by: Matthew Auld Reviewed-by: Tvrtko Ursulin Signed-off-by: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20161202102411.19831-1-matthew.auld@intel.com --- drivers/gpu/drm/i915/i915_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ace9be8f6ff9..ae583c79c19f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1522,7 +1522,7 @@ out: return ret; } -int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) +static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) { int error; -- cgit v1.2.3 From 31613268c0a6f7abdb0c19487a084249bcf203ba Mon Sep 17 00:00:00 2001 From: Libin Yang Date: Thu, 1 Dec 2016 13:17:18 +0800 Subject: drm/i915/audio: extend get_saved_enc() to support more scenarios In initialization, audio driver will call functions get_eld() and etc. But at that time, audio driver may not know whether it is DP MST or not. In the original function get_saved_enc(), if it is DP MST, it requires to set the pipe to the correct value, otherwise, pipe to be -1. Although audio driver can get the knowledge whether it is in DP MST mode or not by reading the codec register. It will drop performance each time before it calls the get_eld and other similar functions. As gfx driver can easily know whether it is in DP MST mode or not. Let's extend the get_saved_enc() function to handle the situation that audio driver still sends the device id info even it is in DP SST mode and return the correct intel_encoder instead of panic. Signed-off-by: Libin Yang Reviewed-by: Jani Nikula Signed-off-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/1480569439-54252-1-git-send-email-libin.yang@intel.com --- drivers/gpu/drm/i915/intel_audio.c | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 49f10538d4aa..c8a1345d8347 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -737,25 +737,49 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev) return dev_priv->cdclk_freq; } +/* + * get the intel_encoder according to the parameter port and pipe + * intel_encoder is saved by the index of pipe + * MST & (pipe >= 0): return the av_enc_map[pipe], + * when port is matched + * MST & (pipe < 0): this is invalid + * Non-MST & (pipe >= 0): only pipe = 0 (the first device entry) + * will get the right intel_encoder with port matched + * Non-MST & (pipe < 0): get the right intel_encoder with port matched + */ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv, int port, int pipe) { + struct intel_encoder *encoder; if (WARN_ON(pipe >= I915_MAX_PIPES)) return NULL; /* MST */ - if (pipe >= 0) - return dev_priv->av_enc_map[pipe]; + if (pipe >= 0) { + encoder = dev_priv->av_enc_map[pipe]; + /* + * when bootup, audio driver may not know it is + * MST or not. So it will poll all the port & pipe + * combinations + */ + if (encoder != NULL && encoder->port == port && + encoder->type == INTEL_OUTPUT_DP_MST) + return encoder; + } /* Non-MST */ - for_each_pipe(dev_priv, pipe) { - struct intel_encoder *encoder; + if (pipe > 0) + return NULL; + for_each_pipe(dev_priv, pipe) { encoder = dev_priv->av_enc_map[pipe]; if (encoder == NULL) continue; + if (encoder->type == INTEL_OUTPUT_DP_MST) + continue; + if (port == encoder->port) return encoder; } -- cgit v1.2.3 From f55d23be11ed15f493957246f3b81fc530e79d70 Mon Sep 17 00:00:00 2001 From: Libin Yang Date: Thu, 1 Dec 2016 13:17:19 +0800 Subject: drm/i915/audio: extend audio sync rate support for DP MST Remove the type judgement in i915_audio_component_sync_audio_rate(). Audio rate sync is necessary for all i915 digital audio now. Signed-off-by: Libin Yang Reviewed-by: Jani Nikula Signed-off-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/1480569439-54252-2-git-send-email-libin.yang@intel.com --- drivers/gpu/drm/i915/intel_audio.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index c8a1345d8347..3bbc96c1767f 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -805,9 +805,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, /* 1. get the pipe */ intel_encoder = get_saved_enc(dev_priv, port, pipe); - if (!intel_encoder || !intel_encoder->base.crtc || - (intel_encoder->type != INTEL_OUTPUT_HDMI && - intel_encoder->type != INTEL_OUTPUT_DP)) { + if (!intel_encoder || !intel_encoder->base.crtc) { DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port)); err = -ENODEV; goto unlock; -- cgit v1.2.3 From 8bf41b7298b3b20de3f4a4e70ab58042648f963e Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Mon, 28 Nov 2016 17:29:27 +0200 Subject: drm/i915/gen6+: Clear upper data byte during PCODE write MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The spec calls for the upper data byte to be cleared before most of the PCODE write commands, for others like IPS control it doesn't say anything about this byte. Let's clear it in case it's clobbered somehow, especially that there are places where we only do a PCODE write without a preceding PCODE read. Cc: Ville Syrjälä Signed-off-by: Imre Deak Reviewed-by: Chris Wilson Reviewed-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1480346969-16121-1-git-send-email-imre.deak@intel.com --- drivers/gpu/drm/i915/intel_pm.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index adf208fc6a2d..b7fa1fa7d669 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -7838,6 +7838,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, } I915_WRITE_FW(GEN6_PCODE_DATA, val); + I915_WRITE_FW(GEN6_PCODE_DATA1, 0); I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); if (intel_wait_for_register_fw(dev_priv, -- cgit v1.2.3 From cc3f90f0633c5f08044ba898e3fbf942d2e26cb3 Mon Sep 17 00:00:00 2001 From: Ander Conselvan de Oliveira Date: Fri, 2 Dec 2016 10:23:49 +0200 Subject: drm/i915/glk: Reuse broxton code for geminilake Geminilake is mostly backwards compatible with broxton, so change most of the IS_BROXTON() checks to IS_GEN9_LP(). Differences between the platforms will be implemented in follow-up patches. v2: Don't reuse broxton's path in intel_update_max_cdclk(). Don't set plane count as in broxton. v3: Rebase v4: Include the check intel_bios_is_port_hpd_inverted(). Commit message. v5: Leave i915_dmc_info() out; glk's csr version != bxt's. (Rodrigo) v6: Rebase. v7: Convert a few mode IS_BROXTON() occurances in pps, ddi, dsi and pll code. (Rodrigo) v8: Squash a couple of DDI patches with more conversions. (Rodrigo) Cc: Rodrigo Vivi Reviewed-by: Rodrigo Vivi Signed-off-by: Ander Conselvan de Oliveira Link: http://patchwork.freedesktop.org/patch/msgid/1480667037-11215-2-git-send-email-ander.conselvan.de.oliveira@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 10 +++++----- drivers/gpu/drm/i915/i915_gem_gtt.c | 8 ++++---- drivers/gpu/drm/i915/i915_irq.c | 10 +++++----- drivers/gpu/drm/i915/i915_reg.h | 4 ++-- drivers/gpu/drm/i915/intel_bios.c | 2 +- drivers/gpu/drm/i915/intel_ddi.c | 26 +++++++++++++------------- drivers/gpu/drm/i915/intel_display.c | 14 +++++++------- drivers/gpu/drm/i915/intel_dp.c | 24 ++++++++++++------------ drivers/gpu/drm/i915/intel_dpio_phy.c | 1 - drivers/gpu/drm/i915/intel_dpll_mgr.c | 2 +- drivers/gpu/drm/i915/intel_dsi.c | 30 +++++++++++++++--------------- drivers/gpu/drm/i915/intel_dsi_pll.c | 12 ++++++------ drivers/gpu/drm/i915/intel_hdmi.c | 6 +++--- drivers/gpu/drm/i915/intel_i2c.c | 4 ++-- drivers/gpu/drm/i915/intel_mocs.c | 2 +- drivers/gpu/drm/i915/intel_panel.c | 2 +- drivers/gpu/drm/i915/intel_pm.c | 6 +++--- drivers/gpu/drm/i915/intel_runtime_pm.c | 6 +++--- 18 files changed, 84 insertions(+), 85 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 567980833d76..b906950c4f65 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1108,7 +1108,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) int max_freq; rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); - if (IS_BROXTON(dev_priv)) { + if (IS_GEN9_LP(dev_priv)) { rp_state_cap = I915_READ(BXT_RP_STATE_CAP); gt_perf_status = I915_READ(BXT_GT_PERF_STATUS); } else { @@ -1204,7 +1204,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) seq_printf(m, "Down threshold: %d%%\n", dev_priv->rps.down_threshold); - max_freq = (IS_BROXTON(dev_priv) ? rp_state_cap >> 0 : + max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 : rp_state_cap >> 16) & 0xff; max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1); @@ -1217,7 +1217,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", intel_gpu_freq(dev_priv, max_freq)); - max_freq = (IS_BROXTON(dev_priv) ? rp_state_cap >> 16 : + max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 : rp_state_cap >> 0) & 0xff; max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1); @@ -5180,7 +5180,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; /* BXT has a single slice and at most 3 subslices. */ - if (IS_BROXTON(dev_priv)) { + if (IS_GEN9_LP(dev_priv)) { s_max = 1; ss_max = 3; } @@ -5214,7 +5214,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, for (ss = 0; ss < ss_max; ss++) { unsigned int eu_cnt; - if (IS_BROXTON(dev_priv)) { + if (IS_GEN9_LP(dev_priv)) { if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) /* skip disabled subslice */ continue; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 02fb063302bf..c0042471ad87 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -372,7 +372,7 @@ static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr) /* There are only few exceptions for gen >=6. chv and bxt. * And we are not sure about the latter so play safe for now. */ - if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv)) + if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv)) drm_clflush_virt_range(vaddr, PAGE_SIZE); kunmap_atomic(vaddr); @@ -2939,7 +2939,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) * resort to an uncached mapping. The WC issue is easily caught by the * readback check when writing GTT PTE entries. */ - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) ggtt->gsm = ioremap_nocache(phys_addr, size); else ggtt->gsm = ioremap_wc(phys_addr, size); @@ -3069,7 +3069,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT; - if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv)) + if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv)) chv_setup_private_ppat(dev_priv); else bdw_setup_private_ppat(dev_priv); @@ -3309,7 +3309,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) ggtt->base.closed = false; if (INTEL_GEN(dev_priv) >= 8) { - if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv)) + if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv)) chv_setup_private_ppat(dev_priv); else bdw_setup_private_ppat(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 0b119b99cd9b..a0e70f5b3aad 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2435,7 +2435,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) found = true; } - if (IS_BROXTON(dev_priv)) { + if (IS_GEN9_LP(dev_priv)) { tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; if (tmp_mask) { bxt_hpd_irq_handler(dev_priv, tmp_mask, @@ -2451,7 +2451,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) } } - if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { + if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { gmbus_irq_handler(dev_priv); found = true; } @@ -3375,7 +3375,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) GEN9_DE_PIPE_IRQ_FAULT_ERRORS; de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | GEN9_AUX_CHANNEL_D; - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) de_port_masked |= BXT_DE_PORT_GMBUS; } else { de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | @@ -3386,7 +3386,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) GEN8_PIPE_FIFO_UNDERRUN; de_port_enables = de_port_masked; - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; else if (IS_BROADWELL(dev_priv)) de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; @@ -4211,7 +4211,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) dev->driver->irq_uninstall = gen8_irq_uninstall; dev->driver->enable_vblank = gen8_enable_vblank; dev->driver->disable_vblank = gen8_disable_vblank; - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv)) dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 6747d6864aaf..1dec2072a9e6 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3256,7 +3256,7 @@ enum skl_disp_power_wells { #define INTERVAL_1_33_US(us) (((us) * 3) >> 2) #define INTERVAL_0_833_US(us) (((us) * 6) / 5) #define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \ - (IS_BROXTON(dev_priv) ? \ + (IS_GEN9_LP(dev_priv) ? \ INTERVAL_0_833_US(us) : \ INTERVAL_1_33_US(us)) : \ INTERVAL_1_28_US(us)) @@ -3265,7 +3265,7 @@ enum skl_disp_power_wells { #define INTERVAL_1_33_TO_US(interval) (((interval) << 2) / 3) #define INTERVAL_0_833_TO_US(interval) (((interval) * 5) / 6) #define GT_PM_INTERVAL_TO_US(dev_priv, interval) (IS_GEN9(dev_priv) ? \ - (IS_BROXTON(dev_priv) ? \ + (IS_GEN9_LP(dev_priv) ? \ INTERVAL_0_833_TO_US(interval) : \ INTERVAL_1_33_TO_US(interval)) : \ INTERVAL_1_28_TO_US(interval)) diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 7ffab1abc518..eaade27af386 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1779,7 +1779,7 @@ intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, { int i; - if (WARN_ON_ONCE(!IS_BROXTON(dev_priv))) + if (WARN_ON_ONCE(!IS_GEN9_LP(dev_priv))) return false; for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 8b47efa37e47..d808a2ccc29e 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -442,7 +442,7 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) return hdmi_level; if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { @@ -484,7 +484,7 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder) const struct ddi_buf_trans *ddi_translations_edp; const struct ddi_buf_trans *ddi_translations; - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) return; if (IS_KABYLAKE(dev_priv)) { @@ -567,7 +567,7 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder) enum port port = intel_ddi_get_encoder_port(encoder); const struct ddi_buf_trans *ddi_translations_hdmi; - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) return; hdmi_level = intel_ddi_hdmi_level(dev_priv, port); @@ -1091,7 +1091,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder, hsw_ddi_clock_get(encoder, pipe_config); else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) skl_ddi_clock_get(encoder, pipe_config); - else if (IS_BROXTON(dev_priv)) + else if (IS_GEN9_LP(dev_priv)) bxt_ddi_clock_get(encoder, pipe_config); } @@ -1153,7 +1153,7 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc, if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) return skl_ddi_pll_select(intel_crtc, crtc_state, intel_encoder); - else if (IS_BROXTON(dev_priv)) + else if (IS_GEN9_LP(dev_priv)) return bxt_ddi_pll_select(intel_crtc, crtc_state, intel_encoder); else @@ -1429,7 +1429,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port)); out: - if (ret && IS_BROXTON(dev_priv)) { + if (ret && IS_GEN9_LP(dev_priv)) { tmp = I915_READ(BXT_PHY_CTL(port)); if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK | BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED) @@ -1643,7 +1643,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp) if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) skl_ddi_set_iboost(encoder, level); - else if (IS_BROXTON(dev_priv)) + else if (IS_GEN9_LP(dev_priv)) bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type); return DDI_BUF_TRANS_SELECT(level); @@ -1716,7 +1716,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, intel_prepare_hdmi_ddi_buffers(encoder); if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) skl_ddi_set_iboost(encoder, level); - else if (IS_BROXTON(dev_priv)) + else if (IS_GEN9_LP(dev_priv)) bxt_ddi_vswing_sequence(dev_priv, level, port, INTEL_OUTPUT_HDMI); @@ -2053,7 +2053,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder, intel_ddi_clock_get(encoder, pipe_config); - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) pipe_config->lane_lat_optim_mask = bxt_ddi_phy_get_lane_lat_optim_mask(encoder); } @@ -2077,7 +2077,7 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder, else ret = intel_dp_compute_config(encoder, pipe_config, conn_state); - if (IS_BROXTON(dev_priv) && ret) + if (IS_GEN9_LP(dev_priv) && ret) pipe_config->lane_lat_optim_mask = bxt_ddi_phy_calc_lane_lat_optim_mask(encoder, pipe_config->lane_count); @@ -2137,7 +2137,7 @@ intel_ddi_get_link_dpll(struct intel_dp *intel_dp, int clock) struct intel_shared_dpll_config tmp_pll_config; enum intel_dpll_id dpll_id; - if (IS_BROXTON(dev_priv)) { + if (IS_GEN9_LP(dev_priv)) { dpll_id = (enum intel_dpll_id)dig_port->port; /* * Select the required PLL. This works for platforms where @@ -2233,7 +2233,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_encoder->compute_config = intel_ddi_compute_config; intel_encoder->enable = intel_enable_ddi; - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable; intel_encoder->pre_enable = intel_ddi_pre_enable; intel_encoder->disable = intel_disable_ddi; @@ -2254,7 +2254,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) * configuration so that we use the proper lane count for our * calculations. */ - if (IS_BROXTON(dev_priv) && port == PORT_A) { + if (IS_GEN9_LP(dev_priv) && port == PORT_A) { if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) { DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n"); intel_dig_port->saved_port_bits |= DDI_A_4_LANES; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e5573a3ca704..43f727bbe4fd 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -614,12 +614,12 @@ static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv, INTELPllInvalid("m1 out of range\n"); if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) && - !IS_CHERRYVIEW(dev_priv) && !IS_BROXTON(dev_priv)) + !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv)) if (clock->m1 <= clock->m2) INTELPllInvalid("m1 <= m2\n"); if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && - !IS_BROXTON(dev_priv)) { + !IS_GEN9_LP(dev_priv)) { if (clock->p < limit->p.min || limit->p.max < clock->p) INTELPllInvalid("p out of range\n"); if (clock->m < limit->m.min || limit->m.max < clock->m) @@ -10640,7 +10640,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) skylake_get_ddi_pll(dev_priv, port, pipe_config); - else if (IS_BROXTON(dev_priv)) + else if (IS_GEN9_LP(dev_priv)) bxt_get_ddi_pll(dev_priv, port, pipe_config); else haswell_get_ddi_pll(dev_priv, port, pipe_config); @@ -10685,7 +10685,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask); - if (IS_BROXTON(dev_priv) && + if (IS_GEN9_LP(dev_priv) && bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) { WARN_ON(active); active = true; @@ -12783,7 +12783,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, DRM_DEBUG_KMS("ips: %i, double wide: %i\n", pipe_config->ips_enabled, pipe_config->double_wide); - if (IS_BROXTON(dev_priv)) { + if (IS_GEN9_LP(dev_priv)) { DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x," "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, " "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n", @@ -15477,7 +15477,7 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv) static void intel_pps_init(struct drm_i915_private *dev_priv) { - if (HAS_PCH_SPLIT(dev_priv) || IS_BROXTON(dev_priv)) + if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv)) dev_priv->pps_mmio_base = PCH_PPS_BASE; else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) dev_priv->pps_mmio_base = VLV_PPS_BASE; @@ -15504,7 +15504,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) if (intel_crt_present(dev_priv)) intel_crt_init(dev_priv); - if (IS_BROXTON(dev_priv)) { + if (IS_GEN9_LP(dev_priv)) { /* * FIXME: Broxton doesn't support port detection via the * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 9dfbde472419..1f2420cbe06a 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -233,7 +233,7 @@ intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates) struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); int size; - if (IS_BROXTON(dev_priv)) { + if (IS_GEN9_LP(dev_priv)) { *source_rates = bxt_rates; size = ARRAY_SIZE(bxt_rates); } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { @@ -645,7 +645,7 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) struct intel_encoder *encoder; if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && - !IS_BROXTON(dev_priv))) + !IS_GEN9_LP(dev_priv))) return; /* @@ -665,7 +665,7 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) continue; intel_dp = enc_to_intel_dp(&encoder->base); - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) intel_dp->pps_reset = true; else intel_dp->pps_pipe = INVALID_PIPE; @@ -688,7 +688,7 @@ static void intel_pps_get_registers(struct drm_i915_private *dev_priv, memset(regs, 0, sizeof(*regs)); - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) pps_idx = bxt_power_sequencer_idx(intel_dp); else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) pps_idx = vlv_power_sequencer_pipe(intel_dp); @@ -697,7 +697,7 @@ static void intel_pps_get_registers(struct drm_i915_private *dev_priv, regs->pp_stat = PP_STATUS(pps_idx); regs->pp_on = PP_ON_DELAYS(pps_idx); regs->pp_off = PP_OFF_DELAYS(pps_idx); - if (!IS_BROXTON(dev_priv)) + if (!IS_GEN9_LP(dev_priv)) regs->pp_div = PP_DIVISOR(pps_idx); } @@ -2984,7 +2984,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); enum port port = dp_to_dig_port(intel_dp)->port; - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; else if (INTEL_GEN(dev_priv) >= 9) { if (dev_priv->vbt.edp.low_vswing && port == PORT_A) @@ -4300,7 +4300,7 @@ static bool intel_digital_port_connected(struct drm_i915_private *dev_priv, return ibx_digital_port_connected(dev_priv, port); else if (HAS_PCH_SPLIT(dev_priv)) return cpt_digital_port_connected(dev_priv, port); - else if (IS_BROXTON(dev_priv)) + else if (IS_GEN9_LP(dev_priv)) return bxt_digital_port_connected(dev_priv, port); else if (IS_GM45(dev_priv)) return gm45_digital_port_connected(dev_priv, port); @@ -4929,7 +4929,7 @@ intel_pps_readout_hw_state(struct drm_i915_private *dev_priv, pp_on = I915_READ(regs.pp_on); pp_off = I915_READ(regs.pp_off); - if (!IS_BROXTON(dev_priv)) { + if (!IS_GEN9_LP(dev_priv)) { I915_WRITE(regs.pp_ctrl, pp_ctl); pp_div = I915_READ(regs.pp_div); } @@ -4947,7 +4947,7 @@ intel_pps_readout_hw_state(struct drm_i915_private *dev_priv, seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> PANEL_POWER_DOWN_DELAY_SHIFT; - if (IS_BROXTON(dev_priv)) { + if (IS_GEN9_LP(dev_priv)) { u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >> BXT_POWER_CYCLE_DELAY_SHIFT; if (tmp > 0) @@ -5078,7 +5078,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); /* Compute the divisor for the pp clock, simply match the Bspec * formula. */ - if (IS_BROXTON(dev_priv)) { + if (IS_GEN9_LP(dev_priv)) { pp_div = I915_READ(regs.pp_ctrl); pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK; pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000) @@ -5104,7 +5104,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, I915_WRITE(regs.pp_on, pp_on); I915_WRITE(regs.pp_off, pp_off); - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) I915_WRITE(regs.pp_ctrl, pp_div); else I915_WRITE(regs.pp_div, pp_div); @@ -5112,7 +5112,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", I915_READ(regs.pp_on), I915_READ(regs.pp_off), - IS_BROXTON(dev_priv) ? + IS_GEN9_LP(dev_priv) ? (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) : I915_READ(regs.pp_div)); } diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c index 7a8e82dabbf2..8c62dea37ce9 100644 --- a/drivers/gpu/drm/i915/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/intel_dpio_phy.c @@ -317,7 +317,6 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, if (bxt_ddi_phy_verify_state(dev_priv, phy)) { DRM_DEBUG_DRIVER("DDI PHY %d already enabled, " "won't reprogram it\n", phy); - return; } diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 58a756f2f224..976d39086d07 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -1860,7 +1860,7 @@ void intel_shared_dpll_init(struct drm_device *dev) if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) dpll_mgr = &skl_pll_mgr; - else if (IS_BROXTON(dev_priv)) + else if (IS_GEN9_LP(dev_priv)) dpll_mgr = &bxt_pll_mgr; else if (HAS_DDI(dev_priv)) dpll_mgr = &hsw_pll_mgr; diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 3bc6213afd3e..0668bbec5028 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -340,7 +340,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, /* DSI uses short packets for sync events, so clear mode flags for DSI */ adjusted_mode->flags = 0; - if (IS_BROXTON(dev_priv)) { + if (IS_GEN9_LP(dev_priv)) { /* Dual link goes to DSI transcoder A. */ if (intel_dsi->ports == BIT(PORT_C)) pipe_config->cpu_transcoder = TRANSCODER_DSI_C; @@ -441,7 +441,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder) if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) vlv_dsi_device_ready(encoder); - else if (IS_BROXTON(dev_priv)) + else if (IS_GEN9_LP(dev_priv)) bxt_dsi_device_ready(encoder); } @@ -464,7 +464,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder) } for_each_dsi_port(port, intel_dsi->ports) { - i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ? + i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ? BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); u32 temp; @@ -497,7 +497,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder) enum port port; for_each_dsi_port(port, intel_dsi->ports) { - i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ? + i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ? BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); u32 temp; @@ -666,7 +666,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) DRM_DEBUG_KMS("\n"); for_each_dsi_port(port, intel_dsi->ports) { /* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */ - i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ? + i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ? BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A); u32 val; @@ -758,12 +758,12 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, * configuration, otherwise accessing DSI registers will hang the * machine. See BSpec North Display Engine registers/MIPI[BXT]. */ - if (IS_BROXTON(dev_priv) && !intel_dsi_pll_is_enabled(dev_priv)) + if (IS_GEN9_LP(dev_priv) && !intel_dsi_pll_is_enabled(dev_priv)) goto out_put_power; /* XXX: this only works for one DSI output */ for_each_dsi_port(port, intel_dsi->ports) { - i915_reg_t ctrl_reg = IS_BROXTON(dev_priv) ? + i915_reg_t ctrl_reg = IS_GEN9_LP(dev_priv) ? BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE; @@ -788,7 +788,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY)) continue; - if (IS_BROXTON(dev_priv)) { + if (IS_GEN9_LP(dev_priv)) { u32 tmp = I915_READ(MIPI_CTRL(port)); tmp &= BXT_PIPE_SELECT_MASK; tmp >>= BXT_PIPE_SELECT_SHIFT; @@ -976,7 +976,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, u32 pclk; DRM_DEBUG_KMS("\n"); - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) bxt_dsi_get_pipe_config(encoder, pipe_config); pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp, @@ -1068,7 +1068,7 @@ static void set_dsi_timings(struct drm_encoder *encoder, hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio); for_each_dsi_port(port, intel_dsi->ports) { - if (IS_BROXTON(dev_priv)) { + if (IS_GEN9_LP(dev_priv)) { /* * Program hdisplay and vdisplay on MIPI transcoder. * This is different from calculated hactive and @@ -1155,7 +1155,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, tmp &= ~READ_REQUEST_PRIORITY_MASK; I915_WRITE(MIPI_CTRL(port), tmp | READ_REQUEST_PRIORITY_HIGH); - } else if (IS_BROXTON(dev_priv)) { + } else if (IS_GEN9_LP(dev_priv)) { enum pipe pipe = intel_crtc->pipe; tmp = I915_READ(MIPI_CTRL(port)); @@ -1193,7 +1193,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, if (intel_dsi->clock_stop) tmp |= CLOCKSTOP; - if (IS_BROXTON(dev_priv)) { + if (IS_GEN9_LP(dev_priv)) { tmp |= BXT_DPHY_DEFEATURE_EN; if (!is_cmd_mode(intel_dsi)) tmp |= BXT_DEFEATURE_DPI_FIFO_CTR; @@ -1244,7 +1244,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, I915_WRITE(MIPI_INIT_COUNT(port), txclkesc(intel_dsi->escape_clk_div, 100)); - if (IS_BROXTON(dev_priv) && (!intel_dsi->dual_link)) { + if (IS_GEN9_LP(dev_priv) && (!intel_dsi->dual_link)) { /* * BXT spec says write MIPI_INIT_COUNT for * both the ports, even if only one is @@ -1454,7 +1454,7 @@ void intel_dsi_init(struct drm_i915_private *dev_priv) if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { dev_priv->mipi_mmio_base = VLV_MIPI_BASE; - } else if (IS_BROXTON(dev_priv)) { + } else if (IS_GEN9_LP(dev_priv)) { dev_priv->mipi_mmio_base = BXT_MIPI_BASE; } else { DRM_ERROR("Unsupported Mipi device to reg base"); @@ -1495,7 +1495,7 @@ void intel_dsi_init(struct drm_i915_private *dev_priv) * On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI * port C. BXT isn't limited like this. */ - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C); else if (port == PORT_A) intel_encoder->crtc_mask = BIT(PIPE_A); diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c index 56eff6004bc0..cf8c1b0c30d6 100644 --- a/drivers/gpu/drm/i915/intel_dsi_pll.c +++ b/drivers/gpu/drm/i915/intel_dsi_pll.c @@ -351,7 +351,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, struct intel_crtc_state *config) { - if (IS_BROXTON(to_i915(encoder->base.dev))) + if (IS_GEN9_LP(to_i915(encoder->base.dev))) return bxt_dsi_get_pclk(encoder, pipe_bpp, config); else return vlv_dsi_get_pclk(encoder, pipe_bpp, config); @@ -504,7 +504,7 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder, bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) { - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) return bxt_dsi_pll_is_enabled(dev_priv); MISSING_CASE(INTEL_DEVID(dev_priv)); @@ -519,7 +519,7 @@ int intel_compute_dsi_pll(struct intel_encoder *encoder, if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) return vlv_compute_dsi_pll(encoder, config); - else if (IS_BROXTON(dev_priv)) + else if (IS_GEN9_LP(dev_priv)) return bxt_compute_dsi_pll(encoder, config); return -ENODEV; @@ -532,7 +532,7 @@ void intel_enable_dsi_pll(struct intel_encoder *encoder, if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) vlv_enable_dsi_pll(encoder, config); - else if (IS_BROXTON(dev_priv)) + else if (IS_GEN9_LP(dev_priv)) bxt_enable_dsi_pll(encoder, config); } @@ -542,7 +542,7 @@ void intel_disable_dsi_pll(struct intel_encoder *encoder) if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) vlv_disable_dsi_pll(encoder); - else if (IS_BROXTON(dev_priv)) + else if (IS_GEN9_LP(dev_priv)) bxt_disable_dsi_pll(encoder); } @@ -566,7 +566,7 @@ void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) bxt_dsi_reset_clocks(encoder, port); else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) vlv_dsi_reset_clocks(encoder, port); diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 374e38a4da43..0bcfead14571 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1251,7 +1251,7 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi, return MODE_CLOCK_HIGH; /* BXT DPLL can't generate 223-240 MHz */ - if (IS_BROXTON(dev_priv) && clock > 223333 && clock < 240000) + if (IS_GEN9_LP(dev_priv) && clock > 223333 && clock < 240000) return MODE_CLOCK_RANGE; /* CHV DPLL can't generate 216-240 MHz */ @@ -1809,13 +1809,13 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv, switch (port) { case PORT_B: - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) ddc_pin = GMBUS_PIN_1_BXT; else ddc_pin = GMBUS_PIN_DPB; break; case PORT_C: - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) ddc_pin = GMBUS_PIN_2_BXT; else ddc_pin = GMBUS_PIN_DPC; diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 62fe529516b1..0164130c0488 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -72,7 +72,7 @@ static const struct gmbus_pin gmbus_pins_bxt[] = { static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv, unsigned int pin) { - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) return &gmbus_pins_bxt[pin]; else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) return &gmbus_pins_skl[pin]; @@ -87,7 +87,7 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, { unsigned int size; - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) size = ARRAY_SIZE(gmbus_pins_bxt); else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) size = ARRAY_SIZE(gmbus_pins_skl); diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index 4f8829c0845e..c787fc4e6eb9 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c @@ -182,7 +182,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv, table->size = ARRAY_SIZE(skylake_mocs_table); table->table = skylake_mocs_table; result = true; - } else if (IS_BROXTON(dev_priv)) { + } else if (IS_GEN9_LP(dev_priv)) { table->size = ARRAY_SIZE(broxton_mocs_table); table->table = broxton_mocs_table; result = true; diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 08ab6d762ca4..3578b402d412 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -1756,7 +1756,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel) intel_dsi_dcs_init_backlight_funcs(connector) == 0) return; - if (IS_BROXTON(dev_priv)) { + if (IS_GEN9_LP(dev_priv)) { panel->backlight.setup = bxt_setup_backlight; panel->backlight.enable = bxt_enable_backlight; panel->backlight.disable = bxt_disable_backlight; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index b7fa1fa7d669..bf94d68a9d0d 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5208,7 +5208,7 @@ int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6) if (!enable_rc6) return 0; - if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) { + if (IS_GEN9_LP(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) { DRM_INFO("RC6 disabled by BIOS\n"); return 0; } @@ -5242,7 +5242,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv) /* All of these values are in units of 50MHz */ /* static values from HW: RP0 > RP1 > RPn (min_freq) */ - if (IS_BROXTON(dev_priv)) { + if (IS_GEN9_LP(dev_priv)) { u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff; dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; @@ -7622,7 +7622,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) dev_priv->display.init_clock_gating = skylake_init_clock_gating; else if (IS_KABYLAKE(dev_priv)) dev_priv->display.init_clock_gating = kabylake_init_clock_gating; - else if (IS_BROXTON(dev_priv)) + else if (IS_GEN9_LP(dev_priv)) dev_priv->display.init_clock_gating = bxt_init_clock_gating; else if (IS_BROADWELL(dev_priv)) dev_priv->display.init_clock_gating = broadwell_init_clock_gating; diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 356c662ad453..66ab1c8afaf2 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -530,7 +530,7 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) u32 mask; mask = DC_STATE_EN_UPTO_DC5; - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) mask |= DC_STATE_EN_DC9; else mask |= DC_STATE_EN_UPTO_DC6; @@ -911,7 +911,7 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, gen9_assert_dbuf_enabled(dev_priv); - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) bxt_verify_ddi_phy_power_wells(dev_priv); } @@ -2170,7 +2170,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv, if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { max_dc = 2; mask = 0; - } else if (IS_BROXTON(dev_priv)) { + } else if (IS_GEN9_LP(dev_priv)) { max_dc = 1; /* * DC9 has a separate HW flow from the rest of the DC states, -- cgit v1.2.3 From 0d03926de530057a15fe1ef735cb7f88716833cd Mon Sep 17 00:00:00 2001 From: Ander Conselvan de Oliveira Date: Fri, 2 Dec 2016 10:23:50 +0200 Subject: drm/i915/glk: Add power wells for Geminilake Geminilake has power wells are similar to SKL, but with the misc IO well being split into separate AUX IO wells. Signed-off-by: Ander Conselvan de Oliveira Reviewed-by: Rodrigo Vivi Link: http://patchwork.freedesktop.org/patch/msgid/1480667037-11215-3-git-send-email-ander.conselvan.de.oliveira@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 6 ++ drivers/gpu/drm/i915/intel_runtime_pm.c | 114 +++++++++++++++++++++++++++++++- 2 files changed, 117 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 1dec2072a9e6..35f78203f0c9 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1044,9 +1044,15 @@ enum skl_disp_power_wells { /* These numbers are fixed and must match the position of the pw bits */ SKL_DISP_PW_MISC_IO, SKL_DISP_PW_DDI_A_E, + GLK_DISP_PW_DDI_A = SKL_DISP_PW_DDI_A_E, SKL_DISP_PW_DDI_B, SKL_DISP_PW_DDI_C, SKL_DISP_PW_DDI_D, + + GLK_DISP_PW_AUX_A = 8, + GLK_DISP_PW_AUX_B, + GLK_DISP_PW_AUX_C, + SKL_DISP_PW_1 = 14, SKL_DISP_PW_2, diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 66ab1c8afaf2..49043fcf694f 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -453,6 +453,45 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv, BIT(POWER_DOMAIN_AUX_C) | \ BIT(POWER_DOMAIN_INIT)) +#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_TRANSCODER_A) | \ + BIT(POWER_DOMAIN_PIPE_B) | \ + BIT(POWER_DOMAIN_TRANSCODER_B) | \ + BIT(POWER_DOMAIN_PIPE_C) | \ + BIT(POWER_DOMAIN_TRANSCODER_C) | \ + BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT(POWER_DOMAIN_AUX_B) | \ + BIT(POWER_DOMAIN_AUX_C) | \ + BIT(POWER_DOMAIN_AUDIO) | \ + BIT(POWER_DOMAIN_VGA) | \ + BIT(POWER_DOMAIN_INIT)) +#define GLK_DISPLAY_DDI_A_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ + BIT(POWER_DOMAIN_INIT)) +#define GLK_DISPLAY_DDI_B_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT(POWER_DOMAIN_INIT)) +#define GLK_DISPLAY_DDI_C_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT(POWER_DOMAIN_INIT)) +#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_AUX_A) | \ + BIT(POWER_DOMAIN_INIT)) +#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_AUX_B) | \ + BIT(POWER_DOMAIN_INIT)) +#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_AUX_C) | \ + BIT(POWER_DOMAIN_INIT)) +#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \ + GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ + BIT(POWER_DOMAIN_MODESET) | \ + BIT(POWER_DOMAIN_AUX_A) | \ + BIT(POWER_DOMAIN_INIT)) + static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) { WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), @@ -694,7 +733,7 @@ gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv, } static void skl_set_power_well(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well, bool enable) + struct i915_power_well *power_well, bool enable) { uint32_t tmp, fuse_status; uint32_t req_mask, state_mask; @@ -720,11 +759,14 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv, return; } break; - case SKL_DISP_PW_DDI_A_E: + case SKL_DISP_PW_MISC_IO: + case SKL_DISP_PW_DDI_A_E: /* GLK_DISP_PW_DDI_A */ case SKL_DISP_PW_DDI_B: case SKL_DISP_PW_DDI_C: case SKL_DISP_PW_DDI_D: - case SKL_DISP_PW_MISC_IO: + case GLK_DISP_PW_AUX_A: + case GLK_DISP_PW_AUX_B: + case GLK_DISP_PW_AUX_C: break; default: WARN(1, "Unknown power well %lu\n", power_well->id); @@ -2150,6 +2192,70 @@ static struct i915_power_well bxt_power_wells[] = { }, }; +static struct i915_power_well glk_power_wells[] = { + { + .name = "always-on", + .always_on = 1, + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + }, + { + .name = "power well 1", + /* Handled by the DMC firmware */ + .domains = 0, + .ops = &skl_power_well_ops, + .id = SKL_DISP_PW_1, + }, + { + .name = "DC off", + .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, + .ops = &gen9_dc_off_power_well_ops, + .id = SKL_DISP_PW_DC_OFF, + }, + { + .name = "power well 2", + .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS, + .ops = &skl_power_well_ops, + .id = SKL_DISP_PW_2, + }, + { + .name = "AUX A", + .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, + .ops = &skl_power_well_ops, + .id = GLK_DISP_PW_AUX_A, + }, + { + .name = "AUX B", + .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, + .ops = &skl_power_well_ops, + .id = GLK_DISP_PW_AUX_B, + }, + { + .name = "AUX C", + .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, + .ops = &skl_power_well_ops, + .id = GLK_DISP_PW_AUX_C, + }, + { + .name = "DDI A power well", + .domains = GLK_DISPLAY_DDI_A_POWER_DOMAINS, + .ops = &skl_power_well_ops, + .id = GLK_DISP_PW_DDI_A, + }, + { + .name = "DDI B power well", + .domains = GLK_DISPLAY_DDI_B_POWER_DOMAINS, + .ops = &skl_power_well_ops, + .id = SKL_DISP_PW_DDI_B, + }, + { + .name = "DDI C power well", + .domains = GLK_DISPLAY_DDI_C_POWER_DOMAINS, + .ops = &skl_power_well_ops, + .id = SKL_DISP_PW_DDI_C, + }, +}; + static int sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, int disable_power_well) @@ -2246,6 +2352,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) set_power_wells(power_domains, skl_power_wells); } else if (IS_BROXTON(dev_priv)) { set_power_wells(power_domains, bxt_power_wells); + } else if (IS_GEMINILAKE(dev_priv)) { + set_power_wells(power_domains, glk_power_wells); } else if (IS_CHERRYVIEW(dev_priv)) { set_power_wells(power_domains, chv_power_wells); } else if (IS_VALLEYVIEW(dev_priv)) { -- cgit v1.2.3 From 0a116ce895e7ee2831c6304df246c40a33bcf454 Mon Sep 17 00:00:00 2001 From: Ander Conselvan de Oliveira Date: Fri, 2 Dec 2016 10:23:51 +0200 Subject: drm/i915/glk: Implement Geminilake DDI init sequence Implement the DDI initsequence and add information about the different phys in GLK. v2: Rebase on the move of phys to be power wells. v3: Rebase on addition of struct bxt_ddi_phy_info. Signed-off-by: Ander Conselvan de Oliveira Reviewed-by: Rodrigo Vivi Link: http://patchwork.freedesktop.org/patch/msgid/1480667037-11215-4-git-send-email-ander.conselvan.de.oliveira@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 5 +- drivers/gpu/drm/i915/i915_reg.h | 17 +++-- drivers/gpu/drm/i915/intel_dpio_phy.c | 114 +++++++++++++++++++++++++++----- drivers/gpu/drm/i915/intel_dpll_mgr.c | 4 +- drivers/gpu/drm/i915/intel_runtime_pm.c | 39 +++++++++++ 5 files changed, 155 insertions(+), 24 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 90ef5a09b40d..035ac75dd1ed 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -234,7 +234,8 @@ enum dpio_channel { enum dpio_phy { DPIO_PHY0, - DPIO_PHY1 + DPIO_PHY1, + DPIO_PHY2, }; enum intel_display_power_domain { @@ -3619,7 +3620,7 @@ u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); /* intel_dpio_phy.c */ -void bxt_port_to_phy_channel(enum port port, +void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, enum dpio_phy *phy, enum dpio_channel *ch); void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv, enum port port, u32 margin, u32 scale, diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 35f78203f0c9..d1f0720c6ef3 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -62,6 +62,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \ (port) == PORT_B ? (b) : (c)) #define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PORT3(pipe, a, b, c)) +#define _PHY3(phy, a, b, c) ((phy) == DPIO_PHY0 ? (a) : \ + (phy) == DPIO_PHY1 ? (b) : (c)) +#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c)) #define _MASKED_FIELD(mask, value) ({ \ if (__builtin_constant_p(mask)) \ @@ -1062,6 +1065,7 @@ enum skl_disp_power_wells { BXT_DPIO_CMN_A, BXT_DPIO_CMN_BC, + GLK_DPIO_CMN_C, }; #define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2)) @@ -1530,8 +1534,10 @@ enum skl_disp_power_wells { /* BXT PHY registers */ #define _BXT_PHY0_BASE 0x6C000 #define _BXT_PHY1_BASE 0x162000 -#define BXT_PHY_BASE(phy) _PIPE((phy), _BXT_PHY0_BASE, \ - _BXT_PHY1_BASE) +#define _BXT_PHY2_BASE 0x163000 +#define BXT_PHY_BASE(phy) _PHY3((phy), _BXT_PHY0_BASE, \ + _BXT_PHY1_BASE, \ + _BXT_PHY2_BASE) #define _BXT_PHY(phy, reg) \ _MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg)) @@ -1543,7 +1549,6 @@ enum skl_disp_power_wells { _MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1)) #define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090) -#define GT_DISPLAY_POWER_ON(phy) (1 << (phy)) #define _BXT_PHY_CTL_DDI_A 0x64C00 #define _BXT_PHY_CTL_DDI_B 0x64C10 @@ -1556,9 +1561,11 @@ enum skl_disp_power_wells { #define _PHY_CTL_FAMILY_EDP 0x64C80 #define _PHY_CTL_FAMILY_DDI 0x64C90 +#define _PHY_CTL_FAMILY_DDI_C 0x64CA0 #define COMMON_RESET_DIS (1 << 31) -#define BXT_PHY_CTL_FAMILY(phy) _MMIO_PIPE((phy), _PHY_CTL_FAMILY_DDI, \ - _PHY_CTL_FAMILY_EDP) +#define BXT_PHY_CTL_FAMILY(phy) _MMIO_PHY3((phy), _PHY_CTL_FAMILY_DDI, \ + _PHY_CTL_FAMILY_EDP, \ + _PHY_CTL_FAMILY_DDI_C) /* BXT PHY PLL registers */ #define _PORT_PLL_A 0x46074 diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c index 8c62dea37ce9..46e38a05b3ac 100644 --- a/drivers/gpu/drm/i915/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/intel_dpio_phy.c @@ -130,6 +130,18 @@ struct bxt_ddi_phy_info { */ enum dpio_phy rcomp_phy; + /** + * @reset_delay: delay in us to wait before setting the common reset + * bit in BXT_PHY_CTL_FAMILY, which effectively enables the phy. + */ + int reset_delay; + + /** + * @pwron_mask: Mask with the appropriate bit set that would cause the + * punit to power this phy if written to BXT_P_CR_GT_DISP_PWRON. + */ + u32 pwron_mask; + /** * @channel: struct containing per channel information. */ @@ -145,6 +157,7 @@ static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = { [DPIO_PHY0] = { .dual_channel = true, .rcomp_phy = DPIO_PHY1, + .pwron_mask = BIT(0), .channel = { [DPIO_CH0] = { .port = PORT_B }, @@ -154,6 +167,7 @@ static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = { [DPIO_PHY1] = { .dual_channel = false, .rcomp_phy = -1, + .pwron_mask = BIT(1), .channel = { [DPIO_CH0] = { .port = PORT_A }, @@ -161,20 +175,77 @@ static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = { }, }; +static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = { + [DPIO_PHY0] = { + .dual_channel = false, + .rcomp_phy = DPIO_PHY1, + .pwron_mask = BIT(0), + .reset_delay = 20, + + .channel = { + [DPIO_CH0] = { .port = PORT_B }, + } + }, + [DPIO_PHY1] = { + .dual_channel = false, + .rcomp_phy = -1, + .pwron_mask = BIT(3), + .reset_delay = 20, + + .channel = { + [DPIO_CH0] = { .port = PORT_A }, + } + }, + [DPIO_PHY2] = { + .dual_channel = false, + .rcomp_phy = DPIO_PHY1, + .pwron_mask = BIT(1), + .reset_delay = 20, + + .channel = { + [DPIO_CH0] = { .port = PORT_C }, + } + }, +}; + static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info) { return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) | BIT(phy_info->channel[DPIO_CH0].port); } -void bxt_port_to_phy_channel(enum port port, +static const struct bxt_ddi_phy_info * +bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count) +{ + if (IS_GEMINILAKE(dev_priv)) { + *count = ARRAY_SIZE(glk_ddi_phy_info); + return glk_ddi_phy_info; + } else { + *count = ARRAY_SIZE(bxt_ddi_phy_info); + return bxt_ddi_phy_info; + } +} + +static const struct bxt_ddi_phy_info * +bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy) +{ + int count; + const struct bxt_ddi_phy_info *phy_list = + bxt_get_phy_list(dev_priv, &count); + + return &phy_list[phy]; +} + +void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, enum dpio_phy *phy, enum dpio_channel *ch) { - const struct bxt_ddi_phy_info *phy_info; - int i; + const struct bxt_ddi_phy_info *phy_info, *phys; + int i, count; + + phys = bxt_get_phy_list(dev_priv, &count); - for (i = 0; i < ARRAY_SIZE(bxt_ddi_phy_info); i++) { - phy_info = &bxt_ddi_phy_info[i]; + for (i = 0; i < count; i++) { + phy_info = &phys[i]; if (port == phy_info->channel[DPIO_CH0].port) { *phy = i; @@ -203,7 +274,7 @@ void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv, enum dpio_phy phy; enum dpio_channel ch; - bxt_port_to_phy_channel(port, &phy, &ch); + bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); /* * While we write to the group register to program all lanes at once we @@ -241,10 +312,12 @@ void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv, bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, enum dpio_phy phy) { - const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy]; + const struct bxt_ddi_phy_info *phy_info; enum port port; - if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy))) + phy_info = bxt_get_phy_info(dev_priv, phy); + + if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask)) return false; if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) & @@ -306,9 +379,11 @@ static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv, static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) { - const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy]; + const struct bxt_ddi_phy_info *phy_info; u32 val; + phy_info = bxt_get_phy_info(dev_priv, phy); + if (bxt_ddi_phy_is_enabled(dev_priv, phy)) { /* Still read out the GRC value for state verification */ if (phy_info->rcomp_phy != -1) @@ -325,7 +400,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, } val = I915_READ(BXT_P_CR_GT_DISP_PWRON); - val |= GT_DISPLAY_POWER_ON(phy); + val |= phy_info->pwron_mask; I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val); /* @@ -383,6 +458,9 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, I915_WRITE(BXT_PORT_REF_DW8(phy), val); } + if (phy_info->reset_delay) + udelay(phy_info->reset_delay); + val = I915_READ(BXT_PHY_CTL_FAMILY(phy)); val |= COMMON_RESET_DIS; I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val); @@ -394,20 +472,24 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) { + const struct bxt_ddi_phy_info *phy_info; uint32_t val; + phy_info = bxt_get_phy_info(dev_priv, phy); + val = I915_READ(BXT_PHY_CTL_FAMILY(phy)); val &= ~COMMON_RESET_DIS; I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val); val = I915_READ(BXT_P_CR_GT_DISP_PWRON); - val &= ~GT_DISPLAY_POWER_ON(phy); + val &= ~phy_info->pwron_mask; I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val); } void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) { - const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy]; + const struct bxt_ddi_phy_info *phy_info = + bxt_get_phy_info(dev_priv, phy); enum dpio_phy rcomp_phy = phy_info->rcomp_phy; bool was_enabled; @@ -460,10 +542,12 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy, bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy) { - const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy]; + const struct bxt_ddi_phy_info *phy_info; uint32_t mask; bool ok; + phy_info = bxt_get_phy_info(dev_priv, phy); + #define _CHK(reg, mask, exp, fmt, ...) \ __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \ ## __VA_ARGS__) @@ -539,7 +623,7 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, enum dpio_channel ch; int lane; - bxt_port_to_phy_channel(port, &phy, &ch); + bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); for (lane = 0; lane < 4; lane++) { u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane)); @@ -567,7 +651,7 @@ bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder) int lane; uint8_t mask; - bxt_port_to_phy_channel(port, &phy, &ch); + bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); mask = 0; for (lane = 0; lane < 4; lane++) { diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 976d39086d07..8a82507dfe3f 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -1373,7 +1373,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, enum dpio_phy phy; enum dpio_channel ch; - bxt_port_to_phy_channel(port, &phy, &ch); + bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); /* Non-SSC reference */ temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); @@ -1491,7 +1491,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, enum dpio_phy phy; enum dpio_channel ch; - bxt_port_to_phy_channel(port, &phy, &ch); + bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) return false; diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 49043fcf694f..4987a66044e6 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -477,6 +477,18 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv, #define GLK_DISPLAY_DDI_C_POWER_DOMAINS ( \ BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ BIT(POWER_DOMAIN_INIT)) +#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ + BIT(POWER_DOMAIN_AUX_A) | \ + BIT(POWER_DOMAIN_INIT)) +#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT(POWER_DOMAIN_AUX_B) | \ + BIT(POWER_DOMAIN_INIT)) +#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT(POWER_DOMAIN_AUX_C) | \ + BIT(POWER_DOMAIN_INIT)) #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ BIT(POWER_DOMAIN_AUX_A) | \ BIT(POWER_DOMAIN_INIT)) @@ -926,6 +938,12 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC); if (power_well->count > 0) bxt_ddi_phy_verify_state(dev_priv, power_well->data); + + if (IS_GEMINILAKE(dev_priv)) { + power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C); + if (power_well->count > 0) + bxt_ddi_phy_verify_state(dev_priv, power_well->data); + } } static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, @@ -2218,6 +2236,27 @@ static struct i915_power_well glk_power_wells[] = { .ops = &skl_power_well_ops, .id = SKL_DISP_PW_2, }, + { + .name = "dpio-common-a", + .domains = GLK_DPIO_CMN_A_POWER_DOMAINS, + .ops = &bxt_dpio_cmn_power_well_ops, + .id = BXT_DPIO_CMN_A, + .data = DPIO_PHY1, + }, + { + .name = "dpio-common-b", + .domains = GLK_DPIO_CMN_B_POWER_DOMAINS, + .ops = &bxt_dpio_cmn_power_well_ops, + .id = BXT_DPIO_CMN_BC, + .data = DPIO_PHY0, + }, + { + .name = "dpio-common-c", + .domains = GLK_DPIO_CMN_C_POWER_DOMAINS, + .ops = &bxt_dpio_cmn_power_well_ops, + .id = GLK_DPIO_CMN_C, + .data = DPIO_PHY2, + }, { .name = "AUX A", .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, -- cgit v1.2.3 From 51b3ee35affa3695bd89f6c6cdb22cd65aff5155 Mon Sep 17 00:00:00 2001 From: Ander Conselvan de Oliveira Date: Fri, 2 Dec 2016 10:23:52 +0200 Subject: drm/i915/glk: Set DCC delay range 2 in PLL enable sequence Follow the PLL enable sequence updated in bspec, which requires the DCC delay range 2 bit to be set. v2: Moved from DDI init sequence to PLL enable. v3: Don't read value from GRP register. (Rodrido) Cc: Rodrigo Vivi Signed-off-by: Ander Conselvan De Oliveira Reviewed-by: Rodrigo Vivi Link: http://patchwork.freedesktop.org/patch/msgid/1480667037-11215-5-git-send-email-ander.conselvan.de.oliveira@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 15 +++++++++++++++ drivers/gpu/drm/i915/intel_dpll_mgr.c | 6 ++++++ 2 files changed, 21 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index d1f0720c6ef3..6cff01debe72 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1784,6 +1784,21 @@ enum skl_disp_power_wells { #define DEEMPH_SHIFT 24 #define DE_EMPHASIS (0xFF << DEEMPH_SHIFT) +#define _PORT_TX_DW5_LN0_A 0x162514 +#define _PORT_TX_DW5_LN0_B 0x6C514 +#define _PORT_TX_DW5_LN0_C 0x6C914 +#define _PORT_TX_DW5_GRP_A 0x162D14 +#define _PORT_TX_DW5_GRP_B 0x6CD14 +#define _PORT_TX_DW5_GRP_C 0x6CF14 +#define BXT_PORT_TX_DW5_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ + _PORT_TX_DW5_LN0_B, \ + _PORT_TX_DW5_LN0_C) +#define BXT_PORT_TX_DW5_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \ + _PORT_TX_DW5_GRP_B, \ + _PORT_TX_DW5_GRP_C) +#define DCC_DELAY_RANGE_1 (1 << 9) +#define DCC_DELAY_RANGE_2 (1 << 8) + #define _PORT_TX_DW14_LN0_A 0x162538 #define _PORT_TX_DW14_LN0_B 0x6C538 #define _PORT_TX_DW14_LN0_C 0x6C938 diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 8a82507dfe3f..63104b7a1bcf 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -1458,6 +1458,12 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, 200)) DRM_ERROR("PLL %d not locked\n", port); + if (IS_GEMINILAKE(dev_priv)) { + temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch)); + temp |= DCC_DELAY_RANGE_2; + I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp); + } + /* * While we write to the group register to program all lanes at once we * can read only lane registers and we pick lanes 0/1 for that. -- cgit v1.2.3 From f7044dd904d7a97967af097515da272fa89b00e9 Mon Sep 17 00:00:00 2001 From: Madhav Chauhan Date: Fri, 2 Dec 2016 10:23:53 +0200 Subject: drm/i915/glk: Update Port PLL enable sequence for Geminilkae Add steps for enabling and disabling Port PLL as per bspec. Signed-off-by: Madhav Chauhan Signed-off-by: Ander Conselvan de Oliveira Reviewed-by: Rodrigo Vivi Link: http://patchwork.freedesktop.org/patch/msgid/1480667037-11215-6-git-send-email-ander.conselvan.de.oliveira@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 2 ++ drivers/gpu/drm/i915/intel_dpll_mgr.c | 20 ++++++++++++++++++++ 2 files changed, 22 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 6cff01debe72..90685d235410 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1574,6 +1574,8 @@ enum skl_disp_power_wells { #define PORT_PLL_ENABLE (1 << 31) #define PORT_PLL_LOCK (1 << 30) #define PORT_PLL_REF_SEL (1 << 27) +#define PORT_PLL_POWER_ENABLE (1 << 26) +#define PORT_PLL_POWER_STATE (1 << 25) #define BXT_PORT_PLL_ENABLE(port) _MMIO_PORT(port, _PORT_PLL_A, _PORT_PLL_B) #define _PORT_PLL_EBB_0_A 0x162034 diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 63104b7a1bcf..97f7cc9b58ee 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -1380,6 +1380,16 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, temp |= PORT_PLL_REF_SEL; I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); + if (IS_GEMINILAKE(dev_priv)) { + temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); + temp |= PORT_PLL_POWER_ENABLE; + I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); + + if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & + PORT_PLL_POWER_STATE), 200)) + DRM_ERROR("Power state not set for PLL:%d\n", port); + } + /* Disable 10 bit clock */ temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch)); temp &= ~PORT_PLL_10BIT_CLK_ENABLE; @@ -1485,6 +1495,16 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv, temp &= ~PORT_PLL_ENABLE; I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); POSTING_READ(BXT_PORT_PLL_ENABLE(port)); + + if (IS_GEMINILAKE(dev_priv)) { + temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); + temp &= ~PORT_PLL_POWER_ENABLE; + I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); + + if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) & + PORT_PLL_POWER_STATE), 200)) + DRM_ERROR("Power state not reset for PLL:%d\n", port); + } } static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, -- cgit v1.2.3 From 89b3c3c7ee9d9be319b90af3d52f00feba04f1d8 Mon Sep 17 00:00:00 2001 From: Ander Conselvan de Oliveira Date: Fri, 2 Dec 2016 10:23:54 +0200 Subject: drm/i915/glk: Reuse broxton's cdclk code for GLK Geminilake has the same register layout, reference clock and programming sequence as broxton. The difference is that it doesn't support the 1.5 divider and has different ratios, but a lot of code can be shared between the two platforms. v2: Rebase (s/broxton/bxt). v3: Fix vco calculation in glk_de_pll_vco(). Signed-off-by: Ander Conselvan de Oliveira Reviewed-by: Rodrigo Vivi Link: http://patchwork.freedesktop.org/patch/msgid/1480667037-11215-7-git-send-email-ander.conselvan.de.oliveira@intel.com --- drivers/gpu/drm/i915/intel_display.c | 73 ++++++++++++++++++++++++++++++++---- 1 file changed, 65 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 43f727bbe4fd..2490fadfb7b4 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -124,6 +124,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc); static void intel_modeset_setup_hw_state(struct drm_device *dev); static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); static int ilk_max_pixel_rate(struct drm_atomic_state *state); +static int glk_calc_cdclk(int max_pixclk); static int bxt_calc_cdclk(int max_pixclk); struct intel_limit { @@ -5841,6 +5842,8 @@ static void intel_update_max_cdclk(struct drm_i915_private *dev_priv) max_cdclk = 308571; dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco); + } else if (IS_GEMINILAKE(dev_priv)) { + dev_priv->max_cdclk_freq = 316800; } else if (IS_BROXTON(dev_priv)) { dev_priv->max_cdclk_freq = 624000; } else if (IS_BROADWELL(dev_priv)) { @@ -5928,6 +5931,26 @@ static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk) return dev_priv->cdclk_pll.ref * ratio; } +static int glk_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk) +{ + int ratio; + + if (cdclk == dev_priv->cdclk_pll.ref) + return 0; + + switch (cdclk) { + default: + MISSING_CASE(cdclk); + case 79200: + case 158400: + case 316800: + ratio = 33; + break; + } + + return dev_priv->cdclk_pll.ref * ratio; +} + static void bxt_de_pll_disable(struct drm_i915_private *dev_priv) { I915_WRITE(BXT_DE_PLL_ENABLE, 0); @@ -5969,7 +5992,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk) u32 val, divider; int vco, ret; - vco = bxt_de_pll_vco(dev_priv, cdclk); + if (IS_GEMINILAKE(dev_priv)) + vco = glk_de_pll_vco(dev_priv, cdclk); + else + vco = bxt_de_pll_vco(dev_priv, cdclk); DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco); @@ -5982,6 +6008,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk) divider = BXT_CDCLK_CD2X_DIV_SEL_2; break; case 3: + WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n"); divider = BXT_CDCLK_CD2X_DIV_SEL_1_5; break; case 2: @@ -6091,6 +6118,8 @@ sanitize: void bxt_init_cdclk(struct drm_i915_private *dev_priv) { + int cdclk; + bxt_sanitize_cdclk(dev_priv); if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) @@ -6101,7 +6130,12 @@ void bxt_init_cdclk(struct drm_i915_private *dev_priv) * - The initial CDCLK needs to be read from VBT. * Need to make this change after VBT has changes for BXT. */ - bxt_set_cdclk(dev_priv, bxt_calc_cdclk(0)); + if (IS_GEMINILAKE(dev_priv)) + cdclk = glk_calc_cdclk(0); + else + cdclk = bxt_calc_cdclk(0); + + bxt_set_cdclk(dev_priv, cdclk); } void bxt_uninit_cdclk(struct drm_i915_private *dev_priv) @@ -6527,6 +6561,16 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, return 200000; } +static int glk_calc_cdclk(int max_pixclk) +{ + if (max_pixclk > 158400) + return 316800; + else if (max_pixclk > 79200) + return 158400; + else + return 79200; +} + static int bxt_calc_cdclk(int max_pixclk) { if (max_pixclk > 576000) @@ -6589,15 +6633,27 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state) static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state) { + struct drm_i915_private *dev_priv = to_i915(state->dev); int max_pixclk = ilk_max_pixel_rate(state); struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + int cdclk; - intel_state->cdclk = intel_state->dev_cdclk = - bxt_calc_cdclk(max_pixclk); + if (IS_GEMINILAKE(dev_priv)) + cdclk = glk_calc_cdclk(max_pixclk); + else + cdclk = bxt_calc_cdclk(max_pixclk); - if (!intel_state->active_crtcs) - intel_state->dev_cdclk = bxt_calc_cdclk(0); + intel_state->cdclk = intel_state->dev_cdclk = cdclk; + + if (!intel_state->active_crtcs) { + if (IS_GEMINILAKE(dev_priv)) + cdclk = glk_calc_cdclk(0); + else + cdclk = bxt_calc_cdclk(0); + + intel_state->dev_cdclk = cdclk; + } return 0; } @@ -7299,6 +7355,7 @@ static int broxton_get_display_clock_speed(struct drm_i915_private *dev_priv) div = 2; break; case BXT_CDCLK_CD2X_DIV_SEL_1_5: + WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n"); div = 3; break; case BXT_CDCLK_CD2X_DIV_SEL_2: @@ -16031,7 +16088,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) dev_priv->display.get_display_clock_speed = skylake_get_display_clock_speed; - else if (IS_BROXTON(dev_priv)) + else if (IS_GEN9_LP(dev_priv)) dev_priv->display.get_display_clock_speed = broxton_get_display_clock_speed; else if (IS_BROADWELL(dev_priv)) @@ -16104,7 +16161,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) valleyview_modeset_commit_cdclk; dev_priv->display.modeset_calc_cdclk = valleyview_modeset_calc_cdclk; - } else if (IS_BROXTON(dev_priv)) { + } else if (IS_GEN9_LP(dev_priv)) { dev_priv->display.modeset_commit_cdclk = bxt_modeset_commit_cdclk; dev_priv->display.modeset_calc_cdclk = -- cgit v1.2.3 From 09d093869feb09f098ac7bd28a8dbadccff79216 Mon Sep 17 00:00:00 2001 From: Ander Conselvan de Oliveira Date: Fri, 2 Dec 2016 10:23:55 +0200 Subject: drm/i915/glk: Allow dotclock up to 2 * cdclk on geminilake Geminilake has double wide pipes so it can output two pixels per CD clock. Signed-off-by: Ander Conselvan de Oliveira Reviewed-by: Rodrigo Vivi Link: http://patchwork.freedesktop.org/patch/msgid/1480667037-11215-8-git-send-email-ander.conselvan.de.oliveira@intel.com --- drivers/gpu/drm/i915/intel_display.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2490fadfb7b4..aabef5e13bd4 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5805,8 +5805,10 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) { int max_cdclk_freq = dev_priv->max_cdclk_freq; - if (INTEL_INFO(dev_priv)->gen >= 9 || - IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + if (IS_GEMINILAKE(dev_priv)) + return 2 * max_cdclk_freq; + else if (INTEL_INFO(dev_priv)->gen >= 9 || + IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) return max_cdclk_freq; else if (IS_CHERRYVIEW(dev_priv)) return max_cdclk_freq*95/100; @@ -6563,9 +6565,9 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, static int glk_calc_cdclk(int max_pixclk) { - if (max_pixclk > 158400) + if (max_pixclk > 2 * 158400) return 316800; - else if (max_pixclk > 79200) + else if (max_pixclk > 2 * 79200) return 158400; else return 79200; -- cgit v1.2.3 From b817c440c1e854f547ba481bec5608f9d9709bcf Mon Sep 17 00:00:00 2001 From: Ander Conselvan de Oliveira Date: Fri, 2 Dec 2016 10:23:56 +0200 Subject: drm/i915/glk: Implement core display init/uninit sequence for geminilake The sequence is pretty much the same as broxton, except that bspec requires the AUX domains to be enabled. But since those can't be enabled before the phys are initialized, we just use the same sequence as broxton. v2: Don't manually enable AUX domains. (Ander) Signed-off-by: Ander Conselvan de Oliveira Reviewed-by: Rodrigo Vivi Link: http://patchwork.freedesktop.org/patch/msgid/1480667037-11215-9-git-send-email-ander.conselvan.de.oliveira@intel.com --- drivers/gpu/drm/i915/intel_runtime_pm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 4987a66044e6..fb10ee630d2e 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -2721,7 +2721,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { skl_display_core_init(dev_priv, resume); - } else if (IS_BROXTON(dev_priv)) { + } else if (IS_GEN9_LP(dev_priv)) { bxt_display_core_init(dev_priv, resume); } else if (IS_CHERRYVIEW(dev_priv)) { mutex_lock(&power_domains->lock); @@ -2760,7 +2760,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv) if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) skl_display_core_uninit(dev_priv); - else if (IS_BROXTON(dev_priv)) + else if (IS_GEN9_LP(dev_priv)) bxt_display_core_uninit(dev_priv); } -- cgit v1.2.3 From e9c9882556fc92f053f81195f75845ec362a9050 Mon Sep 17 00:00:00 2001 From: Ander Conselvan de Oliveira Date: Fri, 2 Dec 2016 10:23:57 +0200 Subject: drm/i915/glk: Configure number of sprite planes properly Geminilake has 4 planes (3 sprites) per pipe. Signed-off-by: Ander Conselvan de Oliveira Reviewed-by: Rodrigo Vivi Link: http://patchwork.freedesktop.org/patch/msgid/1480667037-11215-10-git-send-email-ander.conselvan.de.oliveira@intel.com --- drivers/gpu/drm/i915/intel_device_info.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 185e3bbc9ec9..602d7610ec79 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -278,7 +278,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) * we don't expose the topmost plane at all to prevent ABI breakage * down the line. */ - if (IS_BROXTON(dev_priv)) { + if (IS_GEMINILAKE(dev_priv)) + for_each_pipe(dev_priv, pipe) + info->num_sprites[pipe] = 3; + else if (IS_BROXTON(dev_priv)) { info->num_sprites[PIPE_A] = 2; info->num_sprites[PIPE_B] = 2; info->num_sprites[PIPE_C] = 1; -- cgit v1.2.3 From 01a551971c0fb1739dbe61f60105323a1f8827ad Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Thu, 17 Nov 2016 11:17:36 -0800 Subject: drm/i915: Only poll DW3_A when init DDI PHY for ports B and C. According to Bspec we need to "Poll for PORT_REF_DW3_A grc_done == 1b" only on ports B and C initialization sequence when copying rcomp from port A. So let's follow the spec and only poll for that case and not on every port A initialization. v2: Also remove the grc_done check from bxt_ddi_phy_is_enabled() otherwise it might believe it is disabled and force it to re program. Cc: Imre Deak Cc: Ander Conselvan de Oliveira Signed-off-by: Rodrigo Vivi Reviewed-by: Imre Deak Link: http://patchwork.freedesktop.org/patch/msgid/1479410256-25735-1-git-send-email-rodrigo.vivi@intel.com --- drivers/gpu/drm/i915/intel_dpio_phy.c | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c index 46e38a05b3ac..09b670929786 100644 --- a/drivers/gpu/drm/i915/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/intel_dpio_phy.c @@ -328,14 +328,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, return false; } - if (phy_info->rcomp_phy == -1 && - !(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE)) { - DRM_DEBUG_DRIVER("DDI PHY %d powered, but GRC isn't done\n", - phy); - - return false; - } - if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) { DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n", phy); @@ -441,6 +433,9 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, if (phy_info->rcomp_phy != -1) { uint32_t grc_code; + + bxt_phy_wait_grc_done(dev_priv, phy_info->rcomp_phy); + /* * PHY0 isn't connected to an RCOMP resistor so copy over * the corresponding calibrated value from PHY1, and disable @@ -464,10 +459,6 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, val = I915_READ(BXT_PHY_CTL_FAMILY(phy)); val |= COMMON_RESET_DIS; I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val); - - if (phy_info->rcomp_phy == -1) - bxt_phy_wait_grc_done(dev_priv, phy); - } void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) -- cgit v1.2.3 From ce6612d6844701a5b5a162f201103240c07abd9e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 5 Dec 2016 09:25:26 +0100 Subject: drm/i915: Update DRIVER_DATE to 20161205 Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 035ac75dd1ed..ca9786c05bb2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -76,8 +76,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20161121" -#define DRIVER_TIMESTAMP 1479717903 +#define DRIVER_DATE "20161205" +#define DRIVER_TIMESTAMP 1480926326 #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ -- cgit v1.2.3 From 3c30c7f7b09a0eec58cb43f96705877f66975057 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 2 Dec 2016 18:35:41 +0200 Subject: drm/i915: Add I2C and DP-AUX char devices to debug kconfig These char devices exposing the driver's I2C and DP-AUX adapters for user space tools are useful to debug display output related issues. Enable them with the rest of additional driver debug options. Suggested-by: Chris Wilson Signed-off-by: Imre Deak Reviewed-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/1480696541-13697-1-git-send-email-imre.deak@intel.com --- drivers/gpu/drm/i915/Kconfig.debug | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index a6c69b8cb1d2..cc4ad576e6e9 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -19,6 +19,8 @@ config DRM_I915_DEBUG bool "Enable additional driver debugging" depends on DRM_I915 select PREEMPT_COUNT + select I2C_CHARDEV + select DRM_DP_AUX_CHARDEV select X86_MSR # used by igt/pm_rpm select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks) select DRM_DEBUG_MM if DRM=y -- cgit v1.2.3 From fd81c44eba9ca1e78d0601f37b5d7819df522aa7 Mon Sep 17 00:00:00 2001 From: Dhinakaran Pandiyan Date: Mon, 14 Nov 2016 13:50:20 -0800 Subject: drm/i915: Fix DP link rate math MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We store DP link rates as link clock frequencies in kHz, just like all other clock values. But, DP link rates in the DP Spec. are expressed in Gbps/lane, which seems to have led to some confusion. E.g., for HBR2 Max. data rate = 5.4 Gbps/lane x 4 lane x 8/10 x 1/8 = 2160000 kBps where, 8/10 is for channel encoding and 1/8 is for bit to Byte conversion Using link clock frequency, like we do Max. data rate = 540000 kHz * 4 lanes = 2160000 kSymbols/s Because, each symbol has 8 bit of data, this is 2160000 kBps and there is no need to account for channel encoding here. But, currently we do 540000 kHz * 4 lanes * (8/10) = 1728000 kBps Similarly, while computing the required link bandwidth for a mode, there is a mysterious 1/10 term. This should simply be pixel_clock kHz * (bpp/8) to give the final result in kBps v2: Changed to DIV_ROUND_UP() and comment changes (Ville) Signed-off-by: Dhinakaran Pandiyan Link: http://patchwork.freedesktop.org/patch/msgid/1479160220-17794-1-git-send-email-dhinakaran.pandiyan@intel.com Signed-off-by: Ville Syrjälä --- drivers/gpu/drm/i915/intel_dp.c | 35 +++++++++++++++-------------------- 1 file changed, 15 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 1f2420cbe06a..53fc1b3f6770 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -161,33 +161,23 @@ static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp) return min(source_max, sink_max); } -/* - * The units on the numbers in the next two are... bizarre. Examples will - * make it clearer; this one parallels an example in the eDP spec. - * - * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: - * - * 270000 * 1 * 8 / 10 == 216000 - * - * The actual data capacity of that configuration is 2.16Gbit/s, so the - * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - - * or equivalently, kilopixels per second - so for 1680x1050R it'd be - * 119000. At 18bpp that's 2142000 kilobits per second. - * - * Thus the strange-looking division by 10 in intel_dp_link_required, to - * get the result in decakilobits instead of kilobits. - */ - static int intel_dp_link_required(int pixel_clock, int bpp) { - return (pixel_clock * bpp + 9) / 10; + /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ + return DIV_ROUND_UP(pixel_clock * bpp, 8); } static int intel_dp_max_data_rate(int max_link_clock, int max_lanes) { - return (max_link_clock * max_lanes * 8) / 10; + /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the + * link rate that is generally expressed in Gbps. Since, 8 bits of data + * is transmitted every LS_Clk per lane, there is no need to account for + * the channel encoding that is done in the PHY layer here. + */ + + return max_link_clock * max_lanes; } static int @@ -3573,7 +3563,12 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) if (val == 0) break; - /* Value read is in kHz while drm clock is saved in deca-kHz */ + /* Value read multiplied by 200kHz gives the per-lane + * link rate in kHz. The source rates are, however, + * stored in terms of LS_Clk kHz. The full conversion + * back to symbols is + * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) + */ intel_dp->sink_rates[i] = (val * 200) / 10; } intel_dp->num_sink_rates = i; -- cgit v1.2.3 From 22a2c8e0457f5d66db0819a49beb8c119d8f7a97 Mon Sep 17 00:00:00 2001 From: Dhinakaran Pandiyan Date: Tue, 15 Nov 2016 12:59:06 -0800 Subject: drm/i915: Validate mode against max. link data rate for DP MST MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Not validating the mode rate against max. link rate results in not pruning invalid modes. For e.g, a HBR2 5.4 Gbps 2-lane configuration does not support 4k@60Hz. But, we do not reject this mode. So, make use of the helpers in intel_dp to validate mode data rate against max. link data rate of a configuration. v3: Renamed local variables again for consistency (Manasi) v2: Renamed mode data rate local variable to be more explanatory. Signed-off-by: Dhinakaran Pandiyan Link: http://patchwork.freedesktop.org/patch/msgid/1479243546-17189-1-git-send-email-dhinakaran.pandiyan@intel.com Signed-off-by: Ville Syrjälä --- drivers/gpu/drm/i915/intel_dp.c | 4 ++-- drivers/gpu/drm/i915/intel_dp_mst.c | 12 +++++++++++- drivers/gpu/drm/i915/intel_drv.h | 2 ++ 3 files changed, 15 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 53fc1b3f6770..db75bb924e48 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -161,14 +161,14 @@ static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp) return min(source_max, sink_max); } -static int +int intel_dp_link_required(int pixel_clock, int bpp) { /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ return DIV_ROUND_UP(pixel_clock * bpp, 8); } -static int +int intel_dp_max_data_rate(int max_link_clock, int max_lanes) { /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 1f98757f61e7..205fe4748ec5 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -350,7 +350,17 @@ static enum drm_mode_status intel_dp_mst_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_dp *intel_dp = intel_connector->mst_port; int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; + int bpp = 24; /* MST uses fixed bpp */ + int max_rate, mode_rate, max_lanes, max_link_clock; + + max_link_clock = intel_dp_max_link_rate(intel_dp); + max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); + + max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); + mode_rate = intel_dp_link_required(mode->clock, bpp); /* TODO - validate mode against available PBN for link */ if (mode->clock < 10000) @@ -359,7 +369,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector, if (mode->flags & DRM_MODE_FLAG_DBLCLK) return MODE_H_ILLEGAL; - if (mode->clock > max_dotclk) + if (mode_rate > max_rate || mode->clock > max_dotclk) return MODE_CLOCK_HIGH; return MODE_OK; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 1d126c29598f..fd77a3ba857b 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1457,6 +1457,8 @@ bool intel_dp_read_dpcd(struct intel_dp *intel_dp); bool __intel_dp_read_desc(struct intel_dp *intel_dp, struct intel_dp_desc *desc); bool intel_dp_read_desc(struct intel_dp *intel_dp); +int intel_dp_link_required(int pixel_clock, int bpp); +int intel_dp_max_data_rate(int max_link_clock, int max_lanes); /* intel_dp_aux_backlight.c */ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector); -- cgit v1.2.3 From 3125d39fe62eff2895f75671b07ae972bb2d8124 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 28 Nov 2016 19:37:03 +0200 Subject: drm/i915: Drop the nop intel_update_watermarks() call from haswell_crtc_enable() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit HSW+ all use the .initial_watermarks() hook, so there's no point in calling intel_update_watermarks() from HSW+ specific code. We'll still hang on to the .initial_watermarks NULL check since theoretically if the memory latencies are not populated we would not populate the function pointer either. Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1480354637-14209-2-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_display.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1fafcce53ecc..b01faf50fe42 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5465,10 +5465,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, intel_ddi_enable_transcoder_func(crtc); if (dev_priv->display.initial_watermarks != NULL) - dev_priv->display.initial_watermarks(old_intel_state, - pipe_config); - else - intel_update_watermarks(intel_crtc); + dev_priv->display.initial_watermarks(old_intel_state, pipe_config); /* XXX: Do the pipe assertions at the right place for BXT DSI. */ if (!transcoder_is_dsi(cpu_transcoder)) -- cgit v1.2.3 From ddd2b792cc942adfc31288dabf5d22886c31193f Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 28 Nov 2016 19:37:04 +0200 Subject: drm/i915: Use the ilk_disable_lp_wm() return value MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ilk_disable_lp_wm() will tell us whether the LP1+ watermarks were disabled or not, and hence whether we need to for the vblank wait or not. Let's use that information to eliminate some useless vblank waits. Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1480354637-14209-3-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_display.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b01faf50fe42..1f7d91af60cf 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5114,10 +5114,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) * * WaCxSRDisabledForSpriteScaling:ivb */ - if (pipe_config->disable_lp_wm) { - ilk_disable_lp_wm(dev); + if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev)) intel_wait_for_vblank(dev_priv, crtc->pipe); - } /* * If we're doing a modeset, we're done. No need to do any pre-vblank -- cgit v1.2.3 From 1be4d3793d5a93daddcd9be657c429b38ad750a3 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 28 Nov 2016 19:37:05 +0200 Subject: drm/i915: Fix the level 0 max_wm hack on VLV/CHV MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The watermark should never exceed the FIFO size, so we need to check against the current FIFO size instead of the theoretical maximum when we clamp the level 0 watermark. Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1480354637-14209-4-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_pm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 59a88de83b9a..8ff1e23d403b 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -1132,13 +1132,13 @@ static void vlv_compute_wm(struct intel_crtc *crtc) /* normal watermarks */ for (level = 0; level < wm_state->num_levels; level++) { int wm = vlv_compute_wm_level(plane, crtc, state, level); - int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511; + int max_wm = plane->wm.fifo_size; /* hack */ if (WARN_ON(level == 0 && wm > max_wm)) wm = max_wm; - if (wm > plane->wm.fifo_size) + if (wm > max_wm) break; switch (plane->base.type) { -- cgit v1.2.3 From 50a9dd3f24e1478ce143bf2876b0680f2038c503 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 28 Nov 2016 19:37:06 +0200 Subject: drm/i915: Clean up VLV/CHV maxfifo watermark setup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's compute the maxfifo watermarks using max() instead of min(). Can't even recall why I did it the other way originally. Anyways using max() avoids having to initialize the watermarks to the max value first. Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1480354637-14209-5-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_pm.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 8ff1e23d403b..b979e06536b2 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -1100,7 +1100,6 @@ static void vlv_compute_wm(struct intel_crtc *crtc) struct drm_i915_private *dev_priv = to_i915(dev); struct vlv_wm_state *wm_state = &crtc->wm_state; struct intel_plane *plane; - int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1; int level; memset(wm_state, 0, sizeof(*wm_state)); @@ -1115,13 +1114,6 @@ static void vlv_compute_wm(struct intel_crtc *crtc) if (wm_state->num_active_planes != 1) wm_state->cxsr = false; - if (wm_state->cxsr) { - for (level = 0; level < wm_state->num_levels; level++) { - wm_state->sr[level].plane = sr_fifo_size; - wm_state->sr[level].cursor = 63; - } - } - for_each_intel_plane_on_crtc(dev, crtc, plane) { struct intel_plane_state *state = to_intel_plane_state(plane->base.state); @@ -1172,14 +1164,14 @@ static void vlv_compute_wm(struct intel_crtc *crtc) case DRM_PLANE_TYPE_PRIMARY: for (level = 0; level < wm_state->num_levels; level++) wm_state->sr[level].plane = - min(wm_state->sr[level].plane, + max(wm_state->sr[level].plane, wm_state->wm[level].primary); break; case DRM_PLANE_TYPE_OVERLAY: sprite = vlv_sprite_id(plane->id); for (level = 0; level < wm_state->num_levels; level++) wm_state->sr[level].plane = - min(wm_state->sr[level].plane, + max(wm_state->sr[level].plane, wm_state->wm[level].sprite[sprite]); break; } -- cgit v1.2.3 From 6b6b3eeffb0089c987a2ad66f2050d397dea6241 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 28 Nov 2016 19:37:07 +0200 Subject: drm/i915: Remove duplicated wm setup for vlv and chv MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The code for vlv and chv wm latency/function pointer setup is identical. Drop one of the copies. Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1480354637-14209-6-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_pm.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index b979e06536b2..0c5e2163cc75 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -7683,10 +7683,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv) DRM_DEBUG_KMS("Failed to read display plane latency. " "Disable CxSR\n"); } - } else if (IS_CHERRYVIEW(dev_priv)) { - vlv_setup_wm_latency(dev_priv); - dev_priv->display.update_wm = vlv_update_wm; - } else if (IS_VALLEYVIEW(dev_priv)) { + } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { vlv_setup_wm_latency(dev_priv); dev_priv->display.update_wm = vlv_update_wm; } else if (IS_PINEVIEW(dev_priv)) { -- cgit v1.2.3 From 1b31389cd0e61480950ac366d76a335066af19e2 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 28 Nov 2016 19:37:08 +0200 Subject: drm/i915: Organize vlv/chv watermarks by plane_id MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Store the vlv/chv watermark values in straight up arrays indexed by enum plane_id. Avoids a lot of useless checks for the plane type when we don't have to think which structure member we need to access. Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1480354637-14209-7-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/i915_drv.h | 16 ++- drivers/gpu/drm/i915/intel_pm.c | 209 ++++++++++++++++------------------------ 2 files changed, 92 insertions(+), 133 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ca9786c05bb2..3868eb59b908 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1660,24 +1660,22 @@ struct ilk_wm_values { }; struct vlv_pipe_wm { - uint16_t primary; - uint16_t sprite[2]; - uint8_t cursor; + uint16_t plane[I915_MAX_PLANES]; }; struct vlv_sr_wm { uint16_t plane; - uint8_t cursor; + uint16_t cursor; +}; + +struct vlv_wm_ddl_values { + uint8_t plane[I915_MAX_PLANES]; }; struct vlv_wm_values { struct vlv_pipe_wm pipe[3]; struct vlv_sr_wm sr; - struct { - uint8_t cursor; - uint8_t sprite[2]; - uint8_t primary; - } ddl[3]; + struct vlv_wm_ddl_values ddl[3]; uint8_t level; bool cxsr; }; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 0c5e2163cc75..0a3c26ab9205 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -849,56 +849,56 @@ static void vlv_write_wm_values(struct intel_crtc *crtc, enum pipe pipe = crtc->pipe; I915_WRITE(VLV_DDL(pipe), - (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) | - (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) | - (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) | - (wm->ddl[pipe].primary << DDL_PLANE_SHIFT)); + (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) | + (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) | + (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) | + (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT)); I915_WRITE(DSPFW1, FW_WM(wm->sr.plane, SR) | - FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) | - FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) | - FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA)); + FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | + FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); I915_WRITE(DSPFW2, - FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) | - FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) | - FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA)); + FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | + FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); I915_WRITE(DSPFW3, FW_WM(wm->sr.cursor, CURSOR_SR)); if (IS_CHERRYVIEW(dev_priv)) { I915_WRITE(DSPFW7_CHV, - FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) | - FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC)); + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); I915_WRITE(DSPFW8_CHV, - FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) | - FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE)); + FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) | + FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE)); I915_WRITE(DSPFW9_CHV, - FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) | - FW_WM(wm->pipe[PIPE_C].cursor, CURSORC)); + FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) | + FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC)); I915_WRITE(DSPHOWM, FW_WM(wm->sr.plane >> 9, SR_HI) | - FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) | - FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) | - FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) | - FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) | - FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) | - FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) | - FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) | - FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) | - FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI)); + FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | + FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) | + FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); } else { I915_WRITE(DSPFW7, - FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) | - FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC)); + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); I915_WRITE(DSPHOWM, FW_WM(wm->sr.plane >> 9, SR_HI) | - FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) | - FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) | - FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) | - FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) | - FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) | - FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI)); + FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); } /* zero (unused) WM1 watermarks */ @@ -1053,12 +1053,6 @@ static void vlv_compute_fifo(struct intel_crtc *crtc) WARN_ON(fifo_left != 0); } -/* FIXME kill me */ -static inline int vlv_sprite_id(enum plane_id plane_id) -{ - return plane_id - PLANE_SPRITE0; -} - static void vlv_invert_wms(struct intel_crtc *crtc) { struct vlv_wm_state *wm_state = &crtc->wm_state; @@ -1074,22 +1068,8 @@ static void vlv_invert_wms(struct intel_crtc *crtc) wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor; for_each_intel_plane_on_crtc(dev, crtc, plane) { - switch (plane->base.type) { - int sprite; - case DRM_PLANE_TYPE_CURSOR: - wm_state->wm[level].cursor = plane->wm.fifo_size - - wm_state->wm[level].cursor; - break; - case DRM_PLANE_TYPE_PRIMARY: - wm_state->wm[level].primary = plane->wm.fifo_size - - wm_state->wm[level].primary; - break; - case DRM_PLANE_TYPE_OVERLAY: - sprite = vlv_sprite_id(plane->id); - wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size - - wm_state->wm[level].sprite[sprite]; - break; - } + wm_state->wm[level].plane[plane->id] = plane->wm.fifo_size - + wm_state->wm[level].plane[plane->id]; } } } @@ -1117,6 +1097,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc) for_each_intel_plane_on_crtc(dev, crtc, plane) { struct intel_plane_state *state = to_intel_plane_state(plane->base.state); + int level; if (!state->base.visible) continue; @@ -1133,19 +1114,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc) if (wm > max_wm) break; - switch (plane->base.type) { - int sprite; - case DRM_PLANE_TYPE_CURSOR: - wm_state->wm[level].cursor = wm; - break; - case DRM_PLANE_TYPE_PRIMARY: - wm_state->wm[level].primary = wm; - break; - case DRM_PLANE_TYPE_OVERLAY: - sprite = vlv_sprite_id(plane->id); - wm_state->wm[level].sprite[sprite] = wm; - break; - } + wm_state->wm[level].plane[plane->id] = wm; } wm_state->num_levels = level; @@ -1154,26 +1123,15 @@ static void vlv_compute_wm(struct intel_crtc *crtc) continue; /* maxfifo watermarks */ - switch (plane->base.type) { - int sprite, level; - case DRM_PLANE_TYPE_CURSOR: + if (plane->id == PLANE_CURSOR) { for (level = 0; level < wm_state->num_levels; level++) wm_state->sr[level].cursor = - wm_state->wm[level].cursor; - break; - case DRM_PLANE_TYPE_PRIMARY: - for (level = 0; level < wm_state->num_levels; level++) - wm_state->sr[level].plane = - max(wm_state->sr[level].plane, - wm_state->wm[level].primary); - break; - case DRM_PLANE_TYPE_OVERLAY: - sprite = vlv_sprite_id(plane->id); + wm_state->wm[level].plane[PLANE_CURSOR]; + } else { for (level = 0; level < wm_state->num_levels; level++) wm_state->sr[level].plane = max(wm_state->sr[level].plane, - wm_state->wm[level].sprite[sprite]); - break; + wm_state->wm[level].plane[plane->id]); } } @@ -1321,10 +1279,10 @@ static void vlv_merge_wm(struct drm_device *dev, if (wm->cxsr) wm->sr = wm_state->sr[wm->level]; - wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2; - wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2; - wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2; - wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2; + wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2; + wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2; + wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2; + wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2; } } @@ -1362,8 +1320,8 @@ static void vlv_update_wm(struct intel_crtc *crtc) DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, " "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n", - pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor, - wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1], + pipe_name(pipe), wm.pipe[pipe].plane[PLANE_PRIMARY], wm.pipe[pipe].plane[PLANE_CURSOR], + wm.pipe[pipe].plane[PLANE_SPRITE0], wm.pipe[pipe].plane[PLANE_SPRITE1], wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr); if (wm.cxsr && !dev_priv->wm.vlv.cxsr) @@ -4437,67 +4395,67 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv, for_each_pipe(dev_priv, pipe) { tmp = I915_READ(VLV_DDL(pipe)); - wm->ddl[pipe].primary = + wm->ddl[pipe].plane[PLANE_PRIMARY] = (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); - wm->ddl[pipe].cursor = + wm->ddl[pipe].plane[PLANE_CURSOR] = (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); - wm->ddl[pipe].sprite[0] = + wm->ddl[pipe].plane[PLANE_SPRITE0] = (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); - wm->ddl[pipe].sprite[1] = + wm->ddl[pipe].plane[PLANE_SPRITE1] = (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); } tmp = I915_READ(DSPFW1); wm->sr.plane = _FW_WM(tmp, SR); - wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB); - wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB); - wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA); + wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); + wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB); + wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA); tmp = I915_READ(DSPFW2); - wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB); - wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA); - wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA); + wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB); + wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); + wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA); tmp = I915_READ(DSPFW3); wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); if (IS_CHERRYVIEW(dev_priv)) { tmp = I915_READ(DSPFW7_CHV); - wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED); - wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC); + wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); + wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); tmp = I915_READ(DSPFW8_CHV); - wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF); - wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE); + wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF); + wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE); tmp = I915_READ(DSPFW9_CHV); - wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC); - wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC); + wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC); + wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC); tmp = I915_READ(DSPHOWM); wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; - wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8; - wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8; - wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8; - wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8; - wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8; - wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8; - wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8; - wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8; - wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8; + wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8; + wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8; + wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8; + wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; + wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; + wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8; + wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8; + wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; + wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; } else { tmp = I915_READ(DSPFW7); - wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED); - wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC); + wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); + wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); tmp = I915_READ(DSPHOWM); wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; - wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8; - wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8; - wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8; - wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8; - wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8; - wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8; + wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; + wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; + wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8; + wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8; + wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; + wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; } } @@ -4556,8 +4514,11 @@ void vlv_wm_get_hw_state(struct drm_device *dev) for_each_pipe(dev_priv, pipe) DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n", - pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor, - wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]); + pipe_name(pipe), + wm->pipe[pipe].plane[PLANE_PRIMARY], + wm->pipe[pipe].plane[PLANE_CURSOR], + wm->pipe[pipe].plane[PLANE_SPRITE0], + wm->pipe[pipe].plane[PLANE_SPRITE1]); DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n", wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr); -- cgit v1.2.3 From 26cca0e5f32418a03c1c87bd5d5813612052b1c3 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 28 Nov 2016 19:37:09 +0200 Subject: drm/i915: Introduce vlv_invert_wm_value() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a small helper to do invert the vlv/chv values. Less fragile perhaps, and let's us clearly mark all overlarge wateramarks as disabled (by just making them all USHRT_MAX). Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1480354637-14209-8-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_pm.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 0a3c26ab9205..a23253ce9199 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -1053,6 +1053,14 @@ static void vlv_compute_fifo(struct intel_crtc *crtc) WARN_ON(fifo_left != 0); } +static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size) +{ + if (wm > fifo_size) + return USHRT_MAX; + else + return fifo_size - wm; +} + static void vlv_invert_wms(struct intel_crtc *crtc) { struct vlv_wm_state *wm_state = &crtc->wm_state; @@ -1064,12 +1072,17 @@ static void vlv_invert_wms(struct intel_crtc *crtc) INTEL_INFO(to_i915(dev))->num_pipes * 512 - 1; struct intel_plane *plane; - wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane; - wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor; + wm_state->sr[level].plane = + vlv_invert_wm_value(wm_state->sr[level].plane, + sr_fifo_size); + wm_state->sr[level].cursor = + vlv_invert_wm_value(wm_state->sr[level].cursor, + 63); for_each_intel_plane_on_crtc(dev, crtc, plane) { - wm_state->wm[level].plane[plane->id] = plane->wm.fifo_size - - wm_state->wm[level].plane[plane->id]; + wm_state->wm[level].plane[plane->id] = + vlv_invert_wm_value(wm_state->wm[level].plane[plane->id], + plane->wm.fifo_size); } } } -- cgit v1.2.3 From 7c951c0025d10b1a568fd27754dc4b6384a50f2e Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 28 Nov 2016 19:37:10 +0200 Subject: drm/i915: Pass around dev_priv in vlv wm functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Passing dev_priv instead of dev is the future. Let's make the vlv/chv wm functions respect that idea. Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1480354637-14209-9-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_pm.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index a23253ce9199..472315fd65a7 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -1067,9 +1067,9 @@ static void vlv_invert_wms(struct intel_crtc *crtc) int level; for (level = 0; level < wm_state->num_levels; level++) { - struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const int sr_fifo_size = - INTEL_INFO(to_i915(dev))->num_pipes * 512 - 1; + INTEL_INFO(dev_priv)->num_pipes * 512 - 1; struct intel_plane *plane; wm_state->sr[level].plane = @@ -1079,7 +1079,7 @@ static void vlv_invert_wms(struct intel_crtc *crtc) vlv_invert_wm_value(wm_state->sr[level].cursor, 63); - for_each_intel_plane_on_crtc(dev, crtc, plane) { + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { wm_state->wm[level].plane[plane->id] = vlv_invert_wm_value(wm_state->wm[level].plane[plane->id], plane->wm.fifo_size); @@ -1089,8 +1089,7 @@ static void vlv_invert_wms(struct intel_crtc *crtc) static void vlv_compute_wm(struct intel_crtc *crtc) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct vlv_wm_state *wm_state = &crtc->wm_state; struct intel_plane *plane; int level; @@ -1107,7 +1106,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc) if (wm_state->num_active_planes != 1) wm_state->cxsr = false; - for_each_intel_plane_on_crtc(dev, crtc, plane) { + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { struct intel_plane_state *state = to_intel_plane_state(plane->base.state); int level; @@ -1253,16 +1252,16 @@ static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc) #undef VLV_FIFO -static void vlv_merge_wm(struct drm_device *dev, +static void vlv_merge_wm(struct drm_i915_private *dev_priv, struct vlv_wm_values *wm) { struct intel_crtc *crtc; int num_active_crtcs = 0; - wm->level = to_i915(dev)->wm.max_level; + wm->level = dev_priv->wm.max_level; wm->cxsr = true; - for_each_intel_crtc(dev, crtc) { + for_each_intel_crtc(&dev_priv->drm, crtc) { const struct vlv_wm_state *wm_state = &crtc->wm_state; if (!crtc->active) @@ -1281,7 +1280,7 @@ static void vlv_merge_wm(struct drm_device *dev, if (num_active_crtcs > 1) wm->level = VLV_WM_LEVEL_PM2; - for_each_intel_crtc(dev, crtc) { + for_each_intel_crtc(&dev_priv->drm, crtc) { struct vlv_wm_state *wm_state = &crtc->wm_state; enum pipe pipe = crtc->pipe; @@ -1301,13 +1300,12 @@ static void vlv_merge_wm(struct drm_device *dev, static void vlv_update_wm(struct intel_crtc *crtc) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; struct vlv_wm_values wm = {}; vlv_compute_wm(crtc); - vlv_merge_wm(dev, &wm); + vlv_merge_wm(dev_priv, &wm); if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) { /* FIXME should be part of crtc atomic commit */ -- cgit v1.2.3 From 3d90e649fa3da5c6e5cb72700584689eb6260082 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 28 Nov 2016 19:37:11 +0200 Subject: drm/i915: Protect cxsr state with wm_mutex MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's protect the cxsr state with the wm_mutex, since it might get poked from multiple places if there's a parallel plane update happening with a pipe getting enable/disabled. It's still pretty racy for the old platforms, but for vlv/chv it should work, I think. If not, we'll improve it later anyway. Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1480354637-14209-10-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_display.c | 2 -- drivers/gpu/drm/i915/intel_pm.c | 14 ++++++++++---- 2 files changed, 10 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1f7d91af60cf..4c4882f91369 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5023,7 +5023,6 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) */ if (HAS_GMCH_DISPLAY(dev_priv)) { intel_set_memory_cxsr(dev_priv, false); - dev_priv->wm.vlv.cxsr = false; intel_wait_for_vblank(dev_priv, pipe); } } @@ -5102,7 +5101,6 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) */ if (old_crtc_state->base.active) { intel_set_memory_cxsr(dev_priv, false); - dev_priv->wm.vlv.cxsr = false; intel_wait_for_vblank(dev_priv, crtc->pipe); } } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 472315fd65a7..4a9e1f36d8ff 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -312,14 +312,13 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) #define FW_WM(value, plane) \ (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK) -void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) +static void _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) { u32 val; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); POSTING_READ(FW_BLC_SELF_VLV); - dev_priv->wm.vlv.cxsr = enable; } else if (IS_G4X(dev_priv) || IS_CRESTLINE(dev_priv)) { I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); POSTING_READ(FW_BLC_SELF); @@ -350,6 +349,13 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) DRM_DEBUG_KMS("memory self-refresh is %s\n", enableddisabled(enable)); } +void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) +{ + mutex_lock(&dev_priv->wm.wm_mutex); + _intel_set_memory_cxsr(dev_priv, enable); + dev_priv->wm.vlv.cxsr = enable; + mutex_unlock(&dev_priv->wm.wm_mutex); +} /* * Latency for FIFO fetches is dependent on several factors: @@ -1322,7 +1328,7 @@ static void vlv_update_wm(struct intel_crtc *crtc) chv_set_memory_pm5(dev_priv, false); if (!wm.cxsr && dev_priv->wm.vlv.cxsr) - intel_set_memory_cxsr(dev_priv, false); + _intel_set_memory_cxsr(dev_priv, false); /* FIXME should be part of crtc atomic commit */ vlv_pipe_set_fifo_size(crtc); @@ -1336,7 +1342,7 @@ static void vlv_update_wm(struct intel_crtc *crtc) wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr); if (wm.cxsr && !dev_priv->wm.vlv.cxsr) - intel_set_memory_cxsr(dev_priv, true); + _intel_set_memory_cxsr(dev_priv, true); if (wm.level >= VLV_WM_LEVEL_PM5 && dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5) -- cgit v1.2.3 From 11a85d6ab11440c87ff22bcdaa3b97dd5748ae14 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 28 Nov 2016 19:37:12 +0200 Subject: drm/i915: Skip vblank wait if cxsr was already off MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Before we attempt to turn any planes on or off we must first exit csxr. That's due to cxsr effectively making the plane enable bits read-only. Currently we achieve that with a vblank wait right after toggling the cxsr enable bit. We do the vblank wait even if cxsr was already off, which seems wasteful, so let's try to only do it when absolutely necessary. We could start tracking the cxsr state fully somewhere, but for now it seems easiest to just have intel_set_memory_cxsr() return the previous cxsr state. Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1480354637-14209-11-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/intel_display.c | 10 ++++------ drivers/gpu/drm/i915/intel_pm.c | 31 ++++++++++++++++++++++++------- 3 files changed, 29 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 3868eb59b908..74658acb307b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3575,7 +3575,7 @@ extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv); extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv); extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val); -extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, +extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable); int i915_reg_read_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 4c4882f91369..231f099bbd34 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5021,10 +5021,9 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) * event which is after the vblank start event, so we need to have a * wait-for-vblank between disabling the plane and the pipe. */ - if (HAS_GMCH_DISPLAY(dev_priv)) { - intel_set_memory_cxsr(dev_priv, false); + if (HAS_GMCH_DISPLAY(dev_priv) && + intel_set_memory_cxsr(dev_priv, false)) intel_wait_for_vblank(dev_priv, pipe); - } } static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) @@ -5099,10 +5098,9 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) * event which is after the vblank start event, so we need to have a * wait-for-vblank between disabling the plane and the pipe. */ - if (old_crtc_state->base.active) { - intel_set_memory_cxsr(dev_priv, false); + if (old_crtc_state->base.active && + intel_set_memory_cxsr(dev_priv, false)) intel_wait_for_vblank(dev_priv, crtc->pipe); - } } /* diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 4a9e1f36d8ff..cb6b26b9134f 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -312,22 +312,30 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) #define FW_WM(value, plane) \ (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK) -static void _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) +static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) { + bool was_enabled; u32 val; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); POSTING_READ(FW_BLC_SELF_VLV); } else if (IS_G4X(dev_priv) || IS_CRESTLINE(dev_priv)) { + was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); POSTING_READ(FW_BLC_SELF); } else if (IS_PINEVIEW(dev_priv)) { - val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN; - val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0; + val = I915_READ(DSPFW3); + was_enabled = val & PINEVIEW_SELF_REFRESH_EN; + if (enable) + val |= PINEVIEW_SELF_REFRESH_EN; + else + val &= ~PINEVIEW_SELF_REFRESH_EN; I915_WRITE(DSPFW3, val); POSTING_READ(DSPFW3); } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) { + was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); I915_WRITE(FW_BLC_SELF, val); @@ -338,23 +346,32 @@ static void _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl * and yet it does have the related watermark in * FW_BLC_SELF. What's going on? */ + was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : _MASKED_BIT_DISABLE(INSTPM_SELF_EN); I915_WRITE(INSTPM, val); POSTING_READ(INSTPM); } else { - return; + return false; } - DRM_DEBUG_KMS("memory self-refresh is %s\n", enableddisabled(enable)); + DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n", + enableddisabled(enable), + enableddisabled(was_enabled)); + + return was_enabled; } -void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) +bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) { + bool ret; + mutex_lock(&dev_priv->wm.wm_mutex); - _intel_set_memory_cxsr(dev_priv, enable); + ret = _intel_set_memory_cxsr(dev_priv, enable); dev_priv->wm.vlv.cxsr = enable; mutex_unlock(&dev_priv->wm.wm_mutex); + + return ret; } /* -- cgit v1.2.3 From 6fe6a7ffd380d4a28c0086ec74751e9e738c01ed Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 28 Nov 2016 19:37:14 +0200 Subject: drm/i915: Zero out HOWM registers before writing new WM/HOWM register values MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On VLV/CHV some of the watermark values are split across two registers: low order bits in one, and high order bits in another. So we may not be able to update a single watermark value atomically, and thus we must be careful that we don't temporarily introduce out of bounds values during the reprogramming. To prevent this we can simply zero out all the high order bits initially, then we update the low order bits, and finally we update the high order bits with the final value. Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1480354637-14209-13-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_pm.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index cb6b26b9134f..25a5c6787ea8 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -877,6 +877,17 @@ static void vlv_write_wm_values(struct intel_crtc *crtc, (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) | (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT)); + /* + * Zero the (unused) WM1 watermarks, and also clear all the + * high order bits so that there are no out of bounds values + * present in the registers during the reprogramming. + */ + I915_WRITE(DSPHOWM, 0); + I915_WRITE(DSPHOWM1, 0); + I915_WRITE(DSPFW4, 0); + I915_WRITE(DSPFW5, 0); + I915_WRITE(DSPFW6, 0); + I915_WRITE(DSPFW1, FW_WM(wm->sr.plane, SR) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | @@ -924,12 +935,6 @@ static void vlv_write_wm_values(struct intel_crtc *crtc, FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); } - /* zero (unused) WM1 watermarks */ - I915_WRITE(DSPFW4, 0); - I915_WRITE(DSPFW5, 0); - I915_WRITE(DSPFW6, 0); - I915_WRITE(DSPHOWM1, 0); - POSTING_READ(DSPFW1); } -- cgit v1.2.3 From 50f4caef8679f51d29f0238e7efa5daf64472b36 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 28 Nov 2016 19:37:15 +0200 Subject: drm/i915: Write all DDL registers in one go MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We'll want to decouple the vlv/chv wm register reprogramming from any single pipe. So let's just write all the DDL registers in one go. We already write all the wm registers anyway since the bits are sprinkled all over the place and so writing them for just a single pipe would have been too messy anyway. Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1480354637-14209-14-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_pm.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 25a5c6787ea8..08a1f881eea9 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -865,17 +865,18 @@ static bool g4x_compute_srwm(struct drm_i915_private *dev_priv, #define FW_WM_VLV(value, plane) \ (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV) -static void vlv_write_wm_values(struct intel_crtc *crtc, +static void vlv_write_wm_values(struct drm_i915_private *dev_priv, const struct vlv_wm_values *wm) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; + enum pipe pipe; - I915_WRITE(VLV_DDL(pipe), - (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) | - (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) | - (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) | - (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT)); + for_each_pipe(dev_priv, pipe) { + I915_WRITE(VLV_DDL(pipe), + (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) | + (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) | + (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) | + (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT)); + } /* * Zero the (unused) WM1 watermarks, and also clear all the @@ -1355,7 +1356,7 @@ static void vlv_update_wm(struct intel_crtc *crtc) /* FIXME should be part of crtc atomic commit */ vlv_pipe_set_fifo_size(crtc); - vlv_write_wm_values(crtc, &wm); + vlv_write_wm_values(dev_priv, &wm); DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, " "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n", -- cgit v1.2.3 From fa292a4be9914a4f0dbfe9b2f8f1f8b5af942dc3 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 28 Nov 2016 19:37:16 +0200 Subject: drm/i915: Clean up vlv_program_watermarks() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add small helpers to make the intent of the staggered enable/disable sequence in vlv_program_watermarks() easier on the eyes. Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1480354637-14209-15-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_pm.c | 44 ++++++++++++++++++++++++----------------- 1 file changed, 26 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 08a1f881eea9..46ac7f5366d4 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -1327,55 +1327,63 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv, } } +static bool is_disabling(int old, int new, int threshold) +{ + return old >= threshold && new < threshold; +} + +static bool is_enabling(int old, int new, int threshold) +{ + return old < threshold && new >= threshold; +} + static void vlv_update_wm(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - struct vlv_wm_values wm = {}; + struct vlv_wm_values *old_wm = &dev_priv->wm.vlv; + struct vlv_wm_values new_wm = {}; vlv_compute_wm(crtc); - vlv_merge_wm(dev_priv, &wm); + vlv_merge_wm(dev_priv, &new_wm); - if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) { + if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) { /* FIXME should be part of crtc atomic commit */ vlv_pipe_set_fifo_size(crtc); + return; } - if (wm.level < VLV_WM_LEVEL_DDR_DVFS && - dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS) + if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) chv_set_memory_dvfs(dev_priv, false); - if (wm.level < VLV_WM_LEVEL_PM5 && - dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5) + if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) chv_set_memory_pm5(dev_priv, false); - if (!wm.cxsr && dev_priv->wm.vlv.cxsr) + if (is_disabling(old_wm->cxsr, new_wm.cxsr, true)) _intel_set_memory_cxsr(dev_priv, false); /* FIXME should be part of crtc atomic commit */ vlv_pipe_set_fifo_size(crtc); - vlv_write_wm_values(dev_priv, &wm); + vlv_write_wm_values(dev_priv, &new_wm); DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, " "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n", - pipe_name(pipe), wm.pipe[pipe].plane[PLANE_PRIMARY], wm.pipe[pipe].plane[PLANE_CURSOR], - wm.pipe[pipe].plane[PLANE_SPRITE0], wm.pipe[pipe].plane[PLANE_SPRITE1], - wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr); + pipe_name(pipe), new_wm.pipe[pipe].plane[PLANE_PRIMARY], new_wm.pipe[pipe].plane[PLANE_CURSOR], + new_wm.pipe[pipe].plane[PLANE_SPRITE0], new_wm.pipe[pipe].plane[PLANE_SPRITE1], + new_wm.sr.plane, new_wm.sr.cursor, new_wm.level, new_wm.cxsr); - if (wm.cxsr && !dev_priv->wm.vlv.cxsr) + if (is_enabling(old_wm->cxsr, new_wm.cxsr, true)) _intel_set_memory_cxsr(dev_priv, true); - if (wm.level >= VLV_WM_LEVEL_PM5 && - dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5) + if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) chv_set_memory_pm5(dev_priv, true); - if (wm.level >= VLV_WM_LEVEL_DDR_DVFS && - dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS) + if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) chv_set_memory_dvfs(dev_priv, true); - dev_priv->wm.vlv = wm; + *old_wm = new_wm; } #define single_plane_enabled(mask) is_power_of_2(mask) -- cgit v1.2.3 From e339d67eeb0270d6520eb299655fec573409159c Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 28 Nov 2016 19:37:17 +0200 Subject: drm/i915: Pass crtc state to vlv_compute_wm_level() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rather than accessing crtc->config in vlv_compute_wm_level() let's pass in the crtc state explicitly. One step closer to atomic. Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1480354637-14209-16-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_pm.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 46ac7f5366d4..d414c870ce6d 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -978,24 +978,26 @@ static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv) } } -static uint16_t vlv_compute_wm_level(struct intel_plane *plane, - struct intel_crtc *crtc, - const struct intel_plane_state *state, +static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, int level) { + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; int clock, htotal, cpp, width, wm; if (dev_priv->wm.pri_latency[level] == 0) return USHRT_MAX; - if (!state->base.visible) + if (!plane_state->base.visible) return 0; - cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0); - clock = crtc->config->base.adjusted_mode.crtc_clock; - htotal = crtc->config->base.adjusted_mode.crtc_htotal; - width = crtc->config->pipe_src_w; + cpp = drm_format_plane_cpp(plane_state->base.fb->pixel_format, 0); + clock = adjusted_mode->crtc_clock; + htotal = adjusted_mode->crtc_htotal; + width = crtc_state->pipe_src_w; if (WARN_ON(htotal == 0)) htotal = 1; @@ -1145,7 +1147,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc) /* normal watermarks */ for (level = 0; level < wm_state->num_levels; level++) { - int wm = vlv_compute_wm_level(plane, crtc, state, level); + int wm = vlv_compute_wm_level(crtc->config, state, level); int max_wm = plane->wm.fifo_size; /* hack */ -- cgit v1.2.3 From 721d484563e1a51ada760089c490cbc47e909756 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Fri, 2 Dec 2016 15:29:04 +0100 Subject: drm/i915/dsi: Do not clear DPOUNIT_CLOCK_GATE_DISABLE from vlv_init_display_clock_gating MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On my Cherrytrail CUBE iwork8 Air tablet PIPE-A would get stuck on loading i915 at boot 1 out of every 3 boots, resulting in a non functional LCD. Once the i915 driver has successfully loaded, the panel can be disabled / enabled without hitting this issue. The getting stuck is caused by vlv_init_display_clock_gating() clearing the DPOUNIT_CLOCK_GATE_DISABLE bit in DSPCLK_GATE_D when called from chv_pipe_power_well_ops.enable() on driver load, while a pipe is enabled driving the DSI LCD by the BIOS. Clearing this bit while DSI is in use is a known issue and intel_dsi_pre_enable() / intel_dsi_post_disable() already set / clear it as appropriate. This commit modifies vlv_init_display_clock_gating() to leave the DPOUNIT_CLOCK_GATE_DISABLE bit alone fixing the pipe getting stuck. Changes in v2: -Replace PIPE-A with "a pipe" or "the pipe" in the commit msg and comment Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=97330 Cc: stable@vger.kernel.org Signed-off-by: Hans de Goede Reviewed-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/20161202142904.25613-1-hdegoede@redhat.com Signed-off-by: Ville Syrjälä --- drivers/gpu/drm/i915/intel_runtime_pm.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index fb10ee630d2e..c0b7e95b5b8e 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -1099,7 +1099,18 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) { - I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); + u32 val; + + /* + * On driver load, a pipe may be active and driving a DSI display. + * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck + * (and never recovering) in this case. intel_dsi_post_disable() will + * clear it when we turn off the display. + */ + val = I915_READ(DSPCLK_GATE_D); + val &= DPOUNIT_CLOCK_GATE_DISABLE; + val |= VRHUNIT_CLOCK_GATE_DISABLE; + I915_WRITE(DSPCLK_GATE_D, val); /* * Disable trickle feed and enable pnd deadline calculation -- cgit v1.2.3 From 85fd4f58d7efb7bb7ec577eb00dc2c3f2457a452 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 5 Dec 2016 14:29:36 +0000 Subject: drm/i915: Mark all non-vma being inserted into the address spaces We need to distinguish between full i915_vma structs and simple drm_mm_nodes when considering eviction (i.e. we must be careful not to treat a mere drm_mm_node as a much larger i915_vma causing memory corruption, if we are lucky). To do this, color these not-a-vma with -1 (I915_COLOR_UNEVICTABLE). v2...v200: New name for -1. Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/20161205142941.21965-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gvt/aperture_gm.c | 7 +++++-- drivers/gpu/drm/i915/i915_drv.h | 2 ++ drivers/gpu/drm/i915/i915_gem.c | 3 ++- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 11 ++++++----- 5 files changed, 16 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 0d41ebc4aea6..7d33b607bc89 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -73,12 +73,15 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) mutex_lock(&dev_priv->drm.struct_mutex); search_again: ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm, - node, size, 4096, 0, + node, size, 4096, + I915_COLOR_UNEVICTABLE, start, end, search_flag, alloc_flag); if (ret) { ret = i915_gem_evict_something(&dev_priv->ggtt.base, - size, 4096, 0, start, end, 0); + size, 4096, + I915_COLOR_UNEVICTABLE, + start, end, 0); if (ret == 0 && ++retried < 3) goto search_again; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 74658acb307b..a0c67916ec20 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -934,6 +934,8 @@ enum i915_cache_level { I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ }; +#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */ + #define DEFAULT_CONTEXT_HANDLE 0 /** diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 59065ae0b153..ca0bb837a57f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -70,7 +70,8 @@ insert_mappable_node(struct i915_ggtt *ggtt, { memset(node, 0, sizeof(*node)); return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node, - size, 0, -1, + size, 0, + I915_COLOR_UNEVICTABLE, 0, ggtt->mappable_end, DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 985142cc6cc9..5f164adc837c 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -437,7 +437,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, memset(&cache->node, 0, sizeof(cache->node)); ret = drm_mm_insert_node_in_range_generic (&ggtt->base.mm, &cache->node, - 4096, 0, 0, + 4096, 0, I915_COLOR_UNEVICTABLE, 0, ggtt->mappable_end, DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index c0042471ad87..88ddca24afdb 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2071,15 +2071,15 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) return ret; alloc: - ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm, - &ppgtt->node, GEN6_PD_SIZE, - GEN6_PD_ALIGN, 0, + ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm, &ppgtt->node, + GEN6_PD_SIZE, GEN6_PD_ALIGN, + I915_COLOR_UNEVICTABLE, 0, ggtt->base.total, DRM_MM_TOPDOWN); if (ret == -ENOSPC && !retried) { ret = i915_gem_evict_something(&ggtt->base, GEN6_PD_SIZE, GEN6_PD_ALIGN, - I915_CACHE_NONE, + I915_COLOR_UNEVICTABLE, 0, ggtt->base.total, 0); if (ret) @@ -2755,7 +2755,8 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) /* Reserve a mappable slot for our lockless error capture */ ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm, &ggtt->error_capture, - 4096, 0, -1, + 4096, 0, + I915_COLOR_UNEVICTABLE, 0, ggtt->mappable_end, 0, 0); if (ret) -- cgit v1.2.3 From 172ae5b4c8c104af56804bf2406014849ed6fa37 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 5 Dec 2016 14:29:37 +0000 Subject: drm/i915: Fix i915_gem_evict_for_vma (soft-pinning) Soft-pinning depends upon being able to check for availabilty of an interval and evict overlapping object from a drm_mm range manager very quickly. Currently it uses a linear list, and so performance is dire and not suitable as a general replacement. Worse, the current code will oops if it tries to evict an active buffer. It also helps if the routine reports the correct error codes as expected by its callers and emits a tracepoint upon use. For posterity since the wrong patch was pushed (i.e. that missed these key points and had known bugs), this is the changelog that should have been on commit 506a8e87d8d2 ("drm/i915: Add soft-pinning API for execbuffer"): Userspace can pass in an offset that it presumes the object is located at. The kernel will then do its utmost to fit the object into that location. The assumption is that userspace is handling its own object locations (for example along with full-ppgtt) and that the kernel will rarely have to make space for the user's requests. This extends the DRM_IOCTL_I915_GEM_EXECBUFFER2 to do the following: * if the user supplies a virtual address via the execobject->offset *and* sets the EXEC_OBJECT_PINNED flag in execobject->flags, then that object is placed at that offset in the address space selected by the context specifier in execbuffer. * the location must be aligned to the GTT page size, 4096 bytes * as the object is placed exactly as specified, it may be used by this execbuffer call without relocations pointing to it It may fail to do so if: * EINVAL is returned if the object does not have a 4096 byte aligned address * the object conflicts with another pinned object (either pinned by hardware in that address space, e.g. scanouts in the aliasing ppgtt) or within the same batch. EBUSY is returned if the location is pinned by hardware EINVAL is returned if the location is already in use by the batch * EINVAL is returned if the object conflicts with its own alignment (as meets the hardware requirements) or if the placement of the object does not fit within the address space All other execbuffer errors apply. Presence of this execbuf extension may be queried by passing I915_PARAM_HAS_EXEC_SOFTPIN to DRM_IOCTL_I915_GETPARAM and checking for a reported value of 1 (or greater). v2: Combine the hole/adjusted-hole ENOSPC checks v3: More color, more splitting, more blurb. Fixes: 506a8e87d8d2 ("drm/i915: Add soft-pinning API for execbuffer") Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/20161205142941.21965-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_drv.h | 3 +- drivers/gpu/drm/i915/i915_gem_evict.c | 104 ++++++++++++++++++++++------- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 1 + drivers/gpu/drm/i915/i915_trace.h | 28 ++++++++ drivers/gpu/drm/i915/i915_vma.c | 2 +- 5 files changed, 111 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a0c67916ec20..605247baa7d1 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3341,7 +3341,8 @@ int __must_check i915_gem_evict_something(struct i915_address_space *vm, unsigned cache_level, u64 start, u64 end, unsigned flags); -int __must_check i915_gem_evict_for_vma(struct i915_vma *target); +int __must_check i915_gem_evict_for_vma(struct i915_vma *vma, + unsigned int flags); int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); /* belongs in i915_gem_gtt.h */ diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 739ede7c89ed..fa40100146ea 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -212,45 +212,99 @@ found: return ret; } -int -i915_gem_evict_for_vma(struct i915_vma *target) +/** + * i915_gem_evict_for_vma - Evict vmas to make room for binding a new one + * @target: address space and range to evict for + * @flags: additional flags to control the eviction algorithm + * + * This function will try to evict vmas that overlap the target node. + * + * To clarify: This is for freeing up virtual address space, not for freeing + * memory in e.g. the shrinker. + */ +int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags) { - struct drm_mm_node *node, *next; + LIST_HEAD(eviction_list); + struct drm_mm_node *node; + u64 start = target->node.start; + u64 end = start + target->node.size; + struct i915_vma *vma, *next; + bool check_color; + int ret = 0; lockdep_assert_held(&target->vm->i915->drm.struct_mutex); + trace_i915_gem_evict_vma(target, flags); + + check_color = target->vm->mm.color_adjust; + if (check_color) { + /* Expand search to cover neighbouring guard pages (or lack!) */ + if (start > target->vm->start) + start -= 4096; + if (end < target->vm->start + target->vm->total) + end += 4096; + } - list_for_each_entry_safe(node, next, - &target->vm->mm.head_node.node_list, - node_list) { - struct i915_vma *vma; - int ret; - - if (node->start + node->size <= target->node.start) - continue; - if (node->start >= target->node.start + target->node.size) + drm_mm_for_each_node_in_range(node, &target->vm->mm, start, end) { + /* If we find any non-objects (!vma), we cannot evict them */ + if (node->color == I915_COLOR_UNEVICTABLE) { + ret = -ENOSPC; break; + } vma = container_of(node, typeof(*vma), node); - if (i915_vma_is_pinned(vma)) { - if (!vma->exec_entry || i915_vma_pin_count(vma) > 1) - /* Object is pinned for some other use */ - return -EBUSY; + /* If we are using coloring to insert guard pages between + * different cache domains within the address space, we have + * to check whether the objects on either side of our range + * abutt and conflict. If they are in conflict, then we evict + * those as well to make room for our guard pages. + */ + if (check_color) { + if (vma->node.start + vma->node.size == target->node.start) { + if (vma->node.color == target->node.color) + continue; + } + if (vma->node.start == target->node.start + target->node.size) { + if (vma->node.color == target->node.color) + continue; + } + } - /* We need to evict a buffer in the same batch */ - if (vma->exec_entry->flags & EXEC_OBJECT_PINNED) - /* Overlapping fixed objects in the same batch */ - return -EINVAL; + if (flags & PIN_NONBLOCK && + (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) { + ret = -ENOSPC; + break; + } - return -ENOSPC; + /* Overlap of objects in the same batch? */ + if (i915_vma_is_pinned(vma)) { + ret = -ENOSPC; + if (vma->exec_entry && + vma->exec_entry->flags & EXEC_OBJECT_PINNED) + ret = -EINVAL; + break; } - ret = i915_vma_unbind(vma); - if (ret) - return ret; + /* Never show fear in the face of dragons! + * + * We cannot directly remove this node from within this + * iterator and as with i915_gem_evict_something() we employ + * the vma pin_count in order to prevent the action of + * unbinding one vma from freeing (by dropping its active + * reference) another in our eviction list. + */ + __i915_vma_pin(vma); + list_add(&vma->exec_list, &eviction_list); } - return 0; + list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { + list_del_init(&vma->exec_list); + __i915_vma_unpin(vma); + if (ret == 0) + ret = i915_vma_unbind(vma); + } + + return ret; } /** diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 5f164adc837c..d665a33229bd 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -274,6 +274,7 @@ static void eb_destroy(struct eb_vmas *eb) exec_list); list_del_init(&vma->exec_list); i915_gem_execbuffer_unreserve_vma(vma); + vma->exec_entry = NULL; i915_vma_put(vma); } kfree(eb); diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 240067705e1c..18ae37c411fd 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -450,6 +450,34 @@ TRACE_EVENT(i915_gem_evict_vm, TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm) ); +TRACE_EVENT(i915_gem_evict_vma, + TP_PROTO(struct i915_vma *vma, unsigned int flags), + TP_ARGS(vma, flags), + + TP_STRUCT__entry( + __field(u32, dev) + __field(struct i915_address_space *, vm) + __field(u64, start) + __field(u64, size) + __field(unsigned long, color) + __field(unsigned int, flags) + ), + + TP_fast_assign( + __entry->dev = vma->vm->i915->drm.primary->index; + __entry->vm = vma->vm; + __entry->start = vma->node.start; + __entry->size = vma->node.size; + __entry->color = vma->node.color; + __entry->flags = flags; + ), + + TP_printk("dev=%d, vm=%p, start=%llx size=%llx, color=%lx, flags=%x", + __entry->dev, __entry->vm, + __entry->start, __entry->size, + __entry->color, __entry->flags) +); + TRACE_EVENT(i915_gem_ring_sync_to, TP_PROTO(struct drm_i915_gem_request *to, struct drm_i915_gem_request *from), diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 4c91a68ecb6d..bc077e51b3e7 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -401,7 +401,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) vma->node.color = obj->cache_level; ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node); if (ret) { - ret = i915_gem_evict_for_vma(vma); + ret = i915_gem_evict_for_vma(vma, flags); if (ret == 0) ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node); if (ret) -- cgit v1.2.3 From 7d1d9aea3ee0e3aa285431c0fcfb05be8f6ec5f4 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 5 Dec 2016 14:29:38 +0000 Subject: drm/i915: Tidy i915_gem_valid_gtt_space() We can replace a couple of tests with an assertion that the passed in node is already allocated (as matches the existing call convention) and by a small bit of refactoring we can bring the line lengths to under 80cols. Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/20161205142941.21965-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_vma.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index bc077e51b3e7..37c3eebe8316 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -297,10 +297,14 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) vma->flags &= ~I915_VMA_CAN_FENCE; } -bool i915_gem_valid_gtt_space(struct i915_vma *vma, - unsigned long cache_level) +static bool color_differs(struct drm_mm_node *node, unsigned long color) { - struct drm_mm_node *gtt_space = &vma->node; + return node->allocated && node->color != color; +} + +bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level) +{ + struct drm_mm_node *node = &vma->node; struct drm_mm_node *other; /* @@ -313,18 +317,16 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, if (vma->vm->mm.color_adjust == NULL) return true; - if (!drm_mm_node_allocated(gtt_space)) - return true; - - if (list_empty(>t_space->node_list)) - return true; + /* Only valid to be called on an already inserted vma */ + GEM_BUG_ON(!drm_mm_node_allocated(node)); + GEM_BUG_ON(list_empty(&node->node_list)); - other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list); - if (other->allocated && !other->hole_follows && other->color != cache_level) + other = list_prev_entry(node, node_list); + if (color_differs(other, cache_level) && !other->hole_follows) return false; - other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list); - if (other->allocated && !gtt_space->hole_follows && other->color != cache_level) + other = list_next_entry(node, node_list); + if (color_differs(other, cache_level) && !node->hole_follows) return false; return true; -- cgit v1.2.3 From 778e23a9e05b7a59e9dbd3f9d9c9395f2f977fc7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 5 Dec 2016 14:29:39 +0000 Subject: drm/i915: Implement local atomic_state_free callback As we use debugobjects to track the lifetime of fences within our atomic state, we ideally want to mark those objects as freed along with their containers. This merits us hookin into config->funcs->atomic_state_free for this purpose. This allows us to enable debugobjects for sw-fences without triggering known issues. Fixes: fc1584059d6c ("drm/i915: Integrate i915_sw_fence with debugobjects") Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20161205142941.21965-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_display.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 231f099bbd34..a88c810dbf6b 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -15994,6 +15994,17 @@ intel_user_framebuffer_create(struct drm_device *dev, return fb; } +static void intel_atomic_state_free(struct drm_atomic_state *state) +{ + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + + drm_atomic_state_default_release(state); + + i915_sw_fence_fini(&intel_state->commit_ready); + + kfree(state); +} + static const struct drm_mode_config_funcs intel_mode_funcs = { .fb_create = intel_user_framebuffer_create, .output_poll_changed = intel_fbdev_output_poll_changed, @@ -16001,6 +16012,7 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { .atomic_commit = intel_atomic_commit, .atomic_state_alloc = intel_atomic_state_alloc, .atomic_state_clear = intel_atomic_state_clear, + .atomic_state_free = intel_atomic_state_free, }; /** -- cgit v1.2.3 From bdb821ca44d753594ff65df4b7cfe05c5d908719 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 5 Dec 2016 14:29:40 +0000 Subject: drm/i915: Enable swfence debugobject support for i915.ko Only once the debugobject symbols are exported can we enable support for debugging swfences when i915 is built as a module. Requires commit 2617fdca3f68 ("lib/debugobjects: export for use in modules") Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/20161205142941.21965-5-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Kconfig.debug | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index cc4ad576e6e9..597648c7a645 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -24,7 +24,7 @@ config DRM_I915_DEBUG select X86_MSR # used by igt/pm_rpm select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks) select DRM_DEBUG_MM if DRM=y - select DRM_I915_SW_FENCE_DEBUG_OBJECTS if DRM_I915=y + select DRM_I915_SW_FENCE_DEBUG_OBJECTS default n help Choose this option to turn on extra driver debugging that may affect @@ -48,7 +48,7 @@ config DRM_I915_DEBUG_GEM config DRM_I915_SW_FENCE_DEBUG_OBJECTS bool "Enable additional driver debugging for fence objects" - depends on DRM_I915=y + depends on DRM_I915 select DEBUG_OBJECTS default n help -- cgit v1.2.3 From 0798cff46bb2fc9ac24d5d465216db235a8f11f6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 5 Dec 2016 14:29:41 +0000 Subject: drm/i915/execlists: Use list_safe_reset_next() instead of opencoding list.h provides a macro for updating the next element in a safe list-iter, so let's use it so that it is hopefully clearer to the reader about the unusual behaviour, and also easier to grep. Signed-off-by: Chris Wilson Reviewed-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/20161205142941.21965-6-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_lrc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 67aec8f33c1d..8b412880e88c 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -733,7 +733,7 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio) if (prio > READ_ONCE(p->signaler->priority)) list_move_tail(&p->dfs_link, &dfs); - p = list_next_entry(dep, dfs_link); + list_safe_reset_next(dep, p, dfs_link); if (!RB_EMPTY_NODE(&pt->node)) continue; -- cgit v1.2.3 From 4cfa20c8345f04f0aa3ab5e33aa4487bbbcd8d9e Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Fri, 2 Dec 2016 18:47:50 +0000 Subject: drm/i915: allow GEM_BUG_ON expr checking with !DEBUG_GEM Use BUILD_BUG_ON_INVALID(expr) in GEM_BUG_ON when building without DEBUG_GEM. This means the compiler can now check the validity of expr without generating any code, in turn preventing us from inadvertently breaking the build when DEBUG_GEM is not enabled. Cc: Chris Wilson Signed-off-by: Matthew Auld Link: http://patchwork.freedesktop.org/patch/msgid/20161202184750.3843-1-matthew.auld@intel.com Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index 51ec793f2e20..8801a14a78a2 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h @@ -28,7 +28,7 @@ #ifdef CONFIG_DRM_I915_DEBUG_GEM #define GEM_BUG_ON(expr) BUG_ON(expr) #else -#define GEM_BUG_ON(expr) do { } while (0) +#define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) #endif #define I915_NUM_ENGINES 5 -- cgit v1.2.3 From d637c17832bd020c27b3c29c0bd27275d1af9132 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 6 Dec 2016 12:40:51 +0000 Subject: drm/i915: Use memcpy_from_wc for GPU error capture On all platforms we now always read the contents of buffers via the GTT, i.e. using WC cpu access. Reads are slow, but they can be accelerated with an internal read buffer using sse4.1 (movntqda). This is our i915_memcpy_from_wc() routine which also checks for sse4.1 support and so we can fallback to using a regular slow memcpy if we need to. When compressing the pages, the reads are currently done inside zlib's fill_window() routine and so we must copy the page into a temporary which is then already inside the CPU cache and fast for zlib's compression. When not compressing the pages, we don't need a temporary and can just use the accelerated read from WC into the destination. v2: Use zstream locals to reduce diff and allocate the additional temporary storage only if sse4.1 is supported. v3: Use length=0 for the sse4.1 support check Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20161206124051.17040-1-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/i915_gpu_error.c | 50 ++++++++++++++++++++++++++--------- 1 file changed, 37 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index a14f7badc337..307999b852fd 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -176,9 +176,14 @@ static void i915_error_puts(struct drm_i915_error_state_buf *e, #ifdef CONFIG_DRM_I915_COMPRESS_ERROR -static bool compress_init(struct z_stream_s *zstream) +struct compress { + struct z_stream_s zstream; + void *tmp; +}; + +static bool compress_init(struct compress *c) { - memset(zstream, 0, sizeof(*zstream)); + struct z_stream_s *zstream = memset(&c->zstream, 0, sizeof(c->zstream)); zstream->workspace = kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), @@ -191,14 +196,22 @@ static bool compress_init(struct z_stream_s *zstream) return false; } + c->tmp = NULL; + if (i915_memcpy_from_wc(NULL, 0, 0)) + c->tmp = (void *)__get_free_page(GFP_ATOMIC | __GFP_NOWARN); + return true; } -static int compress_page(struct z_stream_s *zstream, +static int compress_page(struct compress *c, void *src, struct drm_i915_error_object *dst) { + struct z_stream_s *zstream = &c->zstream; + zstream->next_in = src; + if (c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE)) + zstream->next_in = c->tmp; zstream->avail_in = PAGE_SIZE; do { @@ -226,9 +239,11 @@ static int compress_page(struct z_stream_s *zstream, return 0; } -static void compress_fini(struct z_stream_s *zstream, +static void compress_fini(struct compress *c, struct drm_i915_error_object *dst) { + struct z_stream_s *zstream = &c->zstream; + if (dst) { zlib_deflate(zstream, Z_FINISH); dst->unused = zstream->avail_out; @@ -236,6 +251,9 @@ static void compress_fini(struct z_stream_s *zstream, zlib_deflateEnd(zstream); kfree(zstream->workspace); + + if (c->tmp) + free_page((unsigned long)c->tmp); } static void err_compression_marker(struct drm_i915_error_state_buf *m) @@ -245,28 +263,34 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m) #else -static bool compress_init(struct z_stream_s *zstream) +struct compress { +}; + +static bool compress_init(struct compress *c) { return true; } -static int compress_page(struct z_stream_s *zstream, +static int compress_page(struct compress *c, void *src, struct drm_i915_error_object *dst) { unsigned long page; + void *ptr; page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) return -ENOMEM; - dst->pages[dst->page_count++] = - memcpy((void *)page, src, PAGE_SIZE); + ptr = (void *)page; + if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE)) + memcpy(ptr, src, PAGE_SIZE); + dst->pages[dst->page_count++] = ptr; return 0; } -static void compress_fini(struct z_stream_s *zstream, +static void compress_fini(struct compress *c, struct drm_i915_error_object *dst) { } @@ -784,7 +808,7 @@ i915_error_object_create(struct drm_i915_private *i915, struct i915_ggtt *ggtt = &i915->ggtt; const u64 slot = ggtt->error_capture.start; struct drm_i915_error_object *dst; - struct z_stream_s zstream; + struct compress compress; unsigned long num_pages; struct sgt_iter iter; dma_addr_t dma; @@ -804,7 +828,7 @@ i915_error_object_create(struct drm_i915_private *i915, dst->page_count = 0; dst->unused = 0; - if (!compress_init(&zstream)) { + if (!compress_init(&compress)) { kfree(dst); return NULL; } @@ -817,7 +841,7 @@ i915_error_object_create(struct drm_i915_private *i915, I915_CACHE_NONE, 0); s = io_mapping_map_atomic_wc(&ggtt->mappable, slot); - ret = compress_page(&zstream, (void __force *)s, dst); + ret = compress_page(&compress, (void __force *)s, dst); io_mapping_unmap_atomic(s); if (ret) @@ -832,7 +856,7 @@ unwind: dst = NULL; out: - compress_fini(&zstream, dst); + compress_fini(&compress, dst); ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE); return dst; } -- cgit v1.2.3 From 9e1d0e604e0e6bd32a55c5c2780ef0687a86c4b6 Mon Sep 17 00:00:00 2001 From: Michel Thierry Date: Mon, 5 Dec 2016 17:57:03 -0800 Subject: drm/i915: Advertise ppgtt support type in platform definition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of being hidden in sanitize_enable_ppgtt. It also seems to be the place to do so nowadays. Signed-off-by: Michel Thierry Reviewed-by: Michał Winiarski Signed-off-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/i915_drv.h | 3 +++ drivers/gpu/drm/i915/i915_gem_gtt.c | 7 +++---- drivers/gpu/drm/i915/i915_pci.c | 11 +++++++++++ 3 files changed, 17 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 605247baa7d1..88e70fe14cd1 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -710,11 +710,14 @@ struct intel_csr { func(is_alpha_support); \ /* Keep has_* in alphabetical order */ \ func(has_64bit_reloc); \ + func(has_aliasing_ppgtt); \ func(has_csr); \ func(has_ddi); \ func(has_dp_mst); \ func(has_fbc); \ func(has_fpga_dbg); \ + func(has_full_ppgtt); \ + func(has_full_48bit_ppgtt); \ func(has_gmbus_irq); \ func(has_gmch_display); \ func(has_guc); \ diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 88ddca24afdb..ef00d36680c9 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -113,10 +113,9 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, bool has_full_ppgtt; bool has_full_48bit_ppgtt; - has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6; - has_full_ppgtt = INTEL_GEN(dev_priv) >= 7; - has_full_48bit_ppgtt = - IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9; + has_aliasing_ppgtt = dev_priv->info.has_aliasing_ppgtt; + has_full_ppgtt = dev_priv->info.has_full_ppgtt; + has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt; if (intel_vgpu_active(dev_priv)) { /* emulation is too hard */ diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 389a33090707..d0b060a1ad7a 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -204,6 +204,7 @@ static const struct intel_device_info intel_ironlake_m_info = { .has_rc6p = 1, \ .has_gmbus_irq = 1, \ .has_hw_contexts = 1, \ + .has_aliasing_ppgtt = 1, \ GEN_DEFAULT_PIPEOFFSETS, \ CURSOR_OFFSETS @@ -226,6 +227,8 @@ static const struct intel_device_info intel_sandybridge_m_info = { .has_rc6p = 1, \ .has_gmbus_irq = 1, \ .has_hw_contexts = 1, \ + .has_aliasing_ppgtt = 1, \ + .has_full_ppgtt = 1, \ GEN_DEFAULT_PIPEOFFSETS, \ IVB_CURSOR_OFFSETS @@ -258,6 +261,8 @@ static const struct intel_device_info intel_ivybridge_q_info = { .has_hw_contexts = 1, \ .has_gmch_display = 1, \ .has_hotplug = 1, \ + .has_aliasing_ppgtt = 1, \ + .has_full_ppgtt = 1, \ .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ .display_mmio_offset = VLV_DISPLAY_BASE, \ GEN_DEFAULT_PIPEOFFSETS, \ @@ -289,6 +294,7 @@ static const struct intel_device_info intel_haswell_info = { HSW_FEATURES, \ BDW_COLORS, \ .has_logical_ring_contexts = 1, \ + .has_full_48bit_ppgtt = 1, \ .has_64bit_reloc = 1 static const struct intel_device_info intel_broadwell_info = { @@ -318,6 +324,8 @@ static const struct intel_device_info intel_cherryview_info = { .has_hw_contexts = 1, .has_logical_ring_contexts = 1, .has_gmch_display = 1, + .has_aliasing_ppgtt = 1, + .has_full_ppgtt = 1, .display_mmio_offset = VLV_DISPLAY_BASE, GEN_CHV_PIPEOFFSETS, CURSOR_OFFSETS, @@ -364,6 +372,9 @@ static const struct intel_device_info intel_skylake_gt3_info = { .has_logical_ring_contexts = 1, \ .has_guc = 1, \ .has_decoupled_mmio = 1, \ + .has_aliasing_ppgtt = 1, \ + .has_full_ppgtt = 1, \ + .has_full_48bit_ppgtt = 1, \ GEN_DEFAULT_PIPEOFFSETS, \ IVB_CURSOR_OFFSETS, \ BDW_COLORS -- cgit v1.2.3 From 70821af60b21cd15892350935c510c769ce80eee Mon Sep 17 00:00:00 2001 From: Michel Thierry Date: Mon, 5 Dec 2016 17:57:04 -0800 Subject: drm/i915: Keep has_* in alphabetical order MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As it already says in the comment block... Signed-off-by: Michel Thierry Reviewed-by: Michał Winiarski Signed-off-by: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20161206015704.12654-1-michel.thierry@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 88e70fe14cd1..13835b98afa5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -713,6 +713,7 @@ struct intel_csr { func(has_aliasing_ppgtt); \ func(has_csr); \ func(has_ddi); \ + func(has_decoupled_mmio); \ func(has_dp_mst); \ func(has_fbc); \ func(has_fpga_dbg); \ @@ -738,8 +739,7 @@ struct intel_csr { func(cursor_needs_physical); \ func(hws_needs_physical); \ func(overlay_needs_physical); \ - func(supports_tv); \ - func(has_decoupled_mmio) + func(supports_tv); struct sseu_dev_info { u8 slice_mask; -- cgit v1.2.3 From c499af5a692fa39644ce7979b4dea5db794df78b Mon Sep 17 00:00:00 2001 From: Arkadiusz Hiler Date: Mon, 5 Dec 2016 17:04:29 +0100 Subject: drm/i915/guc: Drop comment on fwif autogeneration The firmware interface file was initially partially autogenerated, but this is no longer the case. It was never updated automatically, and a lot manual changes were introduced since. >From now on any changes to the firmware interface will be managed by hand, which gives us flexibility when it comes to structure reuse (HuC/GuC) and naming conventions. Cc: Anusha Srivatsa Cc: Jeff Mcgee Cc: Sagar A. Kamble Signed-off-by: Arkadiusz Hiler Reviewed-by: Jeff McGee Signed-off-by: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/1480953869-25267-1-git-send-email-arkadiusz.hiler@intel.com --- drivers/gpu/drm/i915/intel_guc_fwif.h | 9 --------- 1 file changed, 9 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index 00ca0df50f02..3202b32b5638 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h @@ -23,15 +23,6 @@ #ifndef _INTEL_GUC_FWIF_H #define _INTEL_GUC_FWIF_H -/* - * This file is partially autogenerated, although currently with some manual - * fixups afterwards. In future, it should be entirely autogenerated, in order - * to ensure that the definitions herein remain in sync with those used by the - * GuC's own firmware. - * - * EDITING THIS FILE IS THEREFORE NOT RECOMMENDED - YOUR CHANGES MAY BE LOST. - */ - #define GFXCORE_FAMILY_GEN9 12 #define GFXCORE_FAMILY_UNKNOWN 0x7fffffff -- cgit v1.2.3 From 2e0d26f866189f9035e379411e72fcedec678571 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 1 Dec 2016 14:49:55 +0200 Subject: drm/i915: replace platform flags with a platform enum The platform flags in device info are (mostly) mutually exclusive. Replace the flags with an enum. Add the platform enum also for platforms that previously didn't have a flag, and give them codename logging in dmesg. Pineview remains an exception, the platform being G33 for that. v2: Sort enum by gen and date v3: rebase on geminilake enabling Signed-off-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/1480596595-3278-1-git-send-email-jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 80 +++++++++++++++++++------------- drivers/gpu/drm/i915/i915_gpu_error.c | 1 + drivers/gpu/drm/i915/i915_pci.c | 57 +++++++++++++---------- drivers/gpu/drm/i915/intel_device_info.c | 41 +++++++++++++++- 5 files changed, 122 insertions(+), 58 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 1b59d1251da8..95f7a5ef0e36 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -77,6 +77,7 @@ static int i915_capabilities(struct seq_file *m, void *data) const struct intel_device_info *info = INTEL_INFO(dev_priv); seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv)); + seq_printf(m, "platform: %s\n", intel_platform_name(info->platform)); seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv)); #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 13835b98afa5..dc59670160e1 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -687,25 +687,8 @@ struct intel_csr { }; #define DEV_INFO_FOR_EACH_FLAG(func) \ - /* Keep is_* in chronological order */ \ func(is_mobile); \ - func(is_i85x); \ - func(is_i915g); \ - func(is_i945gm); \ - func(is_g33); \ - func(is_g4x); \ func(is_pineview); \ - func(is_broadwater); \ - func(is_crestline); \ - func(is_ivybridge); \ - func(is_valleyview); \ - func(is_cherryview); \ - func(is_haswell); \ - func(is_broadwell); \ - func(is_skylake); \ - func(is_broxton); \ - func(is_geminilake); \ - func(is_kabylake); \ func(is_lp); \ func(is_alpha_support); \ /* Keep has_* in alphabetical order */ \ @@ -759,6 +742,35 @@ static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu) return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask); } +/* Keep in gen based order, and chronological order within a gen */ +enum intel_platform { + INTEL_PLATFORM_UNINITIALIZED = 0, + INTEL_I830, + INTEL_I845G, + INTEL_I85X, + INTEL_I865G, + INTEL_I915G, + INTEL_I915GM, + INTEL_I945G, + INTEL_I945GM, + INTEL_G33, + INTEL_PINEVIEW, + INTEL_BROADWATER, + INTEL_CRESTLINE, + INTEL_G4X, + INTEL_IRONLAKE, + INTEL_SANDYBRIDGE, + INTEL_IVYBRIDGE, + INTEL_VALLEYVIEW, + INTEL_HASWELL, + INTEL_BROADWELL, + INTEL_CHERRYVIEW, + INTEL_SKYLAKE, + INTEL_BROXTON, + INTEL_KABYLAKE, + INTEL_GEMINILAKE, +}; + struct intel_device_info { u32 display_mmio_offset; u16 device_id; @@ -766,6 +778,7 @@ struct intel_device_info { u8 num_sprites[I915_MAX_PIPES]; u8 gen; u16 gen_mask; + enum intel_platform platform; u8 ring_mask; /* Rings supported by the HW */ u8 num_rings; #define DEFINE_FLAG(name) u8 name:1 @@ -2503,33 +2516,33 @@ intel_info(const struct drm_i915_private *dev_priv) #define IS_I830(dev_priv) (INTEL_DEVID(dev_priv) == 0x3577) #define IS_845G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2562) -#define IS_I85X(dev_priv) ((dev_priv)->info.is_i85x) +#define IS_I85X(dev_priv) ((dev_priv)->info.platform == INTEL_I85X) #define IS_I865G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2572) -#define IS_I915G(dev_priv) ((dev_priv)->info.is_i915g) +#define IS_I915G(dev_priv) ((dev_priv)->info.platform == INTEL_I915G) #define IS_I915GM(dev_priv) (INTEL_DEVID(dev_priv) == 0x2592) #define IS_I945G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2772) -#define IS_I945GM(dev_priv) ((dev_priv)->info.is_i945gm) -#define IS_BROADWATER(dev_priv) ((dev_priv)->info.is_broadwater) -#define IS_CRESTLINE(dev_priv) ((dev_priv)->info.is_crestline) +#define IS_I945GM(dev_priv) ((dev_priv)->info.platform == INTEL_I945GM) +#define IS_BROADWATER(dev_priv) ((dev_priv)->info.platform == INTEL_BROADWATER) +#define IS_CRESTLINE(dev_priv) ((dev_priv)->info.platform == INTEL_CRESTLINE) #define IS_GM45(dev_priv) (INTEL_DEVID(dev_priv) == 0x2A42) -#define IS_G4X(dev_priv) ((dev_priv)->info.is_g4x) +#define IS_G4X(dev_priv) ((dev_priv)->info.platform == INTEL_G4X) #define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001) #define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011) #define IS_PINEVIEW(dev_priv) ((dev_priv)->info.is_pineview) -#define IS_G33(dev_priv) ((dev_priv)->info.is_g33) +#define IS_G33(dev_priv) ((dev_priv)->info.platform == INTEL_G33) #define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046) -#define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.is_ivybridge) +#define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.platform == INTEL_IVYBRIDGE) #define IS_IVB_GT1(dev_priv) (INTEL_DEVID(dev_priv) == 0x0156 || \ INTEL_DEVID(dev_priv) == 0x0152 || \ INTEL_DEVID(dev_priv) == 0x015a) -#define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.is_valleyview) -#define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.is_cherryview) -#define IS_HASWELL(dev_priv) ((dev_priv)->info.is_haswell) -#define IS_BROADWELL(dev_priv) ((dev_priv)->info.is_broadwell) -#define IS_SKYLAKE(dev_priv) ((dev_priv)->info.is_skylake) -#define IS_BROXTON(dev_priv) ((dev_priv)->info.is_broxton) -#define IS_GEMINILAKE(dev_priv) ((dev_priv)->info.is_geminilake) -#define IS_KABYLAKE(dev_priv) ((dev_priv)->info.is_kabylake) +#define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_VALLEYVIEW) +#define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_CHERRYVIEW) +#define IS_HASWELL(dev_priv) ((dev_priv)->info.platform == INTEL_HASWELL) +#define IS_BROADWELL(dev_priv) ((dev_priv)->info.platform == INTEL_BROADWELL) +#define IS_SKYLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_SKYLAKE) +#define IS_BROXTON(dev_priv) ((dev_priv)->info.platform == INTEL_BROXTON) +#define IS_KABYLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_KABYLAKE) +#define IS_GEMINILAKE(dev_priv) ((dev_priv)->info.platform == INTEL_GEMINILAKE) #define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile) #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) @@ -3563,6 +3576,7 @@ mkwrite_device_info(struct drm_i915_private *dev_priv) return (struct intel_device_info *)&dev_priv->info; } +const char *intel_platform_name(enum intel_platform platform); void intel_device_info_runtime_init(struct drm_i915_private *dev_priv); void intel_device_info_dump(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 307999b852fd..e16037d1b0ba 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -571,6 +571,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, } err_printf(m, "Reset count: %u\n", error->reset_count); err_printf(m, "Suspend count: %u\n", error->suspend_count); + err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform)); err_printf(m, "PCI ID: 0x%04x\n", pdev->device); err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision); err_printf(m, "PCI Subsystem: %04x:%04x\n", diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index d0b060a1ad7a..dd3e3170c7e9 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -65,17 +65,19 @@ static const struct intel_device_info intel_i830_info = { GEN2_FEATURES, + .platform = INTEL_I830, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, /* legal, last one wins */ }; static const struct intel_device_info intel_845g_info = { GEN2_FEATURES, + .platform = INTEL_I845G, }; static const struct intel_device_info intel_i85x_info = { GEN2_FEATURES, - .is_i85x = 1, .is_mobile = 1, + .platform = INTEL_I85X, .is_mobile = 1, .num_pipes = 2, /* legal, last one wins */ .cursor_needs_physical = 1, .has_fbc = 1, @@ -83,6 +85,7 @@ static const struct intel_device_info intel_i85x_info = { static const struct intel_device_info intel_i865g_info = { GEN2_FEATURES, + .platform = INTEL_I865G, }; #define GEN3_FEATURES \ @@ -94,12 +97,13 @@ static const struct intel_device_info intel_i865g_info = { static const struct intel_device_info intel_i915g_info = { GEN3_FEATURES, - .is_i915g = 1, .cursor_needs_physical = 1, + .platform = INTEL_I915G, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, .hws_needs_physical = 1, }; static const struct intel_device_info intel_i915gm_info = { GEN3_FEATURES, + .platform = INTEL_I915GM, .is_mobile = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, @@ -109,13 +113,14 @@ static const struct intel_device_info intel_i915gm_info = { }; static const struct intel_device_info intel_i945g_info = { GEN3_FEATURES, + .platform = INTEL_I945G, .has_hotplug = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, .hws_needs_physical = 1, }; static const struct intel_device_info intel_i945gm_info = { GEN3_FEATURES, - .is_i945gm = 1, .is_mobile = 1, + .platform = INTEL_I945GM, .is_mobile = 1, .has_hotplug = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, .supports_tv = 1, @@ -133,14 +138,14 @@ static const struct intel_device_info intel_i945gm_info = { static const struct intel_device_info intel_i965g_info = { GEN4_FEATURES, - .is_broadwater = 1, + .platform = INTEL_BROADWATER, .has_overlay = 1, .hws_needs_physical = 1, }; static const struct intel_device_info intel_i965gm_info = { GEN4_FEATURES, - .is_crestline = 1, + .platform = INTEL_CRESTLINE, .is_mobile = 1, .has_fbc = 1, .has_overlay = 1, .supports_tv = 1, @@ -149,21 +154,21 @@ static const struct intel_device_info intel_i965gm_info = { static const struct intel_device_info intel_g33_info = { GEN3_FEATURES, - .is_g33 = 1, + .platform = INTEL_G33, .has_hotplug = 1, .has_overlay = 1, }; static const struct intel_device_info intel_g45_info = { GEN4_FEATURES, - .is_g4x = 1, + .platform = INTEL_G4X, .has_pipe_cxsr = 1, .ring_mask = RENDER_RING | BSD_RING, }; static const struct intel_device_info intel_gm45_info = { GEN4_FEATURES, - .is_g4x = 1, + .platform = INTEL_G4X, .is_mobile = 1, .has_fbc = 1, .has_pipe_cxsr = 1, .supports_tv = 1, @@ -172,7 +177,7 @@ static const struct intel_device_info intel_gm45_info = { static const struct intel_device_info intel_pineview_info = { GEN3_FEATURES, - .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, + .platform = INTEL_G33, .is_pineview = 1, .is_mobile = 1, .has_hotplug = 1, .has_overlay = 1, }; @@ -187,10 +192,12 @@ static const struct intel_device_info intel_pineview_info = { static const struct intel_device_info intel_ironlake_d_info = { GEN5_FEATURES, + .platform = INTEL_IRONLAKE, }; static const struct intel_device_info intel_ironlake_m_info = { GEN5_FEATURES, + .platform = INTEL_IRONLAKE, .is_mobile = 1, }; @@ -210,10 +217,12 @@ static const struct intel_device_info intel_ironlake_m_info = { static const struct intel_device_info intel_sandybridge_d_info = { GEN6_FEATURES, + .platform = INTEL_SANDYBRIDGE, }; static const struct intel_device_info intel_sandybridge_m_info = { GEN6_FEATURES, + .platform = INTEL_SANDYBRIDGE, .is_mobile = 1, }; @@ -234,20 +243,20 @@ static const struct intel_device_info intel_sandybridge_m_info = { static const struct intel_device_info intel_ivybridge_d_info = { GEN7_FEATURES, - .is_ivybridge = 1, + .platform = INTEL_IVYBRIDGE, .has_l3_dpf = 1, }; static const struct intel_device_info intel_ivybridge_m_info = { GEN7_FEATURES, - .is_ivybridge = 1, + .platform = INTEL_IVYBRIDGE, .is_mobile = 1, .has_l3_dpf = 1, }; static const struct intel_device_info intel_ivybridge_q_info = { GEN7_FEATURES, - .is_ivybridge = 1, + .platform = INTEL_IVYBRIDGE, .num_pipes = 0, /* legal, last one wins */ .has_l3_dpf = 1, }; @@ -270,7 +279,7 @@ static const struct intel_device_info intel_ivybridge_q_info = { static const struct intel_device_info intel_valleyview_info = { VLV_FEATURES, - .is_valleyview = 1, + .platform = INTEL_VALLEYVIEW, }; #define HSW_FEATURES \ @@ -286,7 +295,7 @@ static const struct intel_device_info intel_valleyview_info = { static const struct intel_device_info intel_haswell_info = { HSW_FEATURES, - .is_haswell = 1, + .platform = INTEL_HASWELL, .has_l3_dpf = 1, }; @@ -300,13 +309,13 @@ static const struct intel_device_info intel_haswell_info = { static const struct intel_device_info intel_broadwell_info = { BDW_FEATURES, .gen = 8, - .is_broadwell = 1, + .platform = INTEL_BROADWELL, }; static const struct intel_device_info intel_broadwell_gt3_info = { BDW_FEATURES, .gen = 8, - .is_broadwell = 1, + .platform = INTEL_BROADWELL, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, }; @@ -314,7 +323,7 @@ static const struct intel_device_info intel_cherryview_info = { .gen = 8, .num_pipes = 3, .has_hotplug = 1, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, - .is_cherryview = 1, + .platform = INTEL_CHERRYVIEW, .has_64bit_reloc = 1, .has_psr = 1, .has_runtime_pm = 1, @@ -334,7 +343,7 @@ static const struct intel_device_info intel_cherryview_info = { static const struct intel_device_info intel_skylake_info = { BDW_FEATURES, - .is_skylake = 1, + .platform = INTEL_SKYLAKE, .gen = 9, .has_csr = 1, .has_guc = 1, @@ -343,7 +352,7 @@ static const struct intel_device_info intel_skylake_info = { static const struct intel_device_info intel_skylake_gt3_info = { BDW_FEATURES, - .is_skylake = 1, + .platform = INTEL_SKYLAKE, .gen = 9, .has_csr = 1, .has_guc = 1, @@ -380,21 +389,21 @@ static const struct intel_device_info intel_skylake_gt3_info = { BDW_COLORS static const struct intel_device_info intel_broxton_info = { - .is_broxton = 1, GEN9_LP_FEATURES, + .platform = INTEL_BROXTON, .ddb_size = 512, }; static const struct intel_device_info intel_geminilake_info = { - .is_alpha_support = 1, - .is_geminilake = 1, GEN9_LP_FEATURES, + .platform = INTEL_GEMINILAKE, + .is_alpha_support = 1, .ddb_size = 1024, }; static const struct intel_device_info intel_kabylake_info = { BDW_FEATURES, - .is_kabylake = 1, + .platform = INTEL_KABYLAKE, .gen = 9, .has_csr = 1, .has_guc = 1, @@ -403,7 +412,7 @@ static const struct intel_device_info intel_kabylake_info = { static const struct intel_device_info intel_kabylake_gt3_info = { BDW_FEATURES, - .is_kabylake = 1, + .platform = INTEL_KABYLAKE, .gen = 9, .has_csr = 1, .has_guc = 1, diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 602d7610ec79..23b040743a6c 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -24,11 +24,50 @@ #include "i915_drv.h" +#define PLATFORM_NAME(x) [INTEL_##x] = #x +static const char * const platform_names[] = { + PLATFORM_NAME(I830), + PLATFORM_NAME(I845G), + PLATFORM_NAME(I85X), + PLATFORM_NAME(I865G), + PLATFORM_NAME(I915G), + PLATFORM_NAME(I915GM), + PLATFORM_NAME(I945G), + PLATFORM_NAME(I945GM), + PLATFORM_NAME(G33), + PLATFORM_NAME(PINEVIEW), + PLATFORM_NAME(BROADWATER), + PLATFORM_NAME(CRESTLINE), + PLATFORM_NAME(G4X), + PLATFORM_NAME(IRONLAKE), + PLATFORM_NAME(SANDYBRIDGE), + PLATFORM_NAME(IVYBRIDGE), + PLATFORM_NAME(VALLEYVIEW), + PLATFORM_NAME(HASWELL), + PLATFORM_NAME(BROADWELL), + PLATFORM_NAME(CHERRYVIEW), + PLATFORM_NAME(SKYLAKE), + PLATFORM_NAME(BROXTON), + PLATFORM_NAME(KABYLAKE), + PLATFORM_NAME(GEMINILAKE), +}; +#undef PLATFORM_NAME + +const char *intel_platform_name(enum intel_platform platform) +{ + if (WARN_ON_ONCE(platform >= ARRAY_SIZE(platform_names) || + platform_names[platform] == NULL)) + return ""; + + return platform_names[platform]; +} + void intel_device_info_dump(struct drm_i915_private *dev_priv) { const struct intel_device_info *info = &dev_priv->info; - DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x", + DRM_DEBUG_DRIVER("i915 device info: platform=%s gen=%i pciid=0x%04x rev=0x%02x", + intel_platform_name(info->platform), info->gen, dev_priv->drm.pdev->device, dev_priv->drm.pdev->revision); -- cgit v1.2.3 From a5ce929bc3ca3057588fadecc2305f17378ce8ab Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 30 Nov 2016 17:43:02 +0200 Subject: drm/i915: keep intel device info structs in gen based order Move G33 and Pineview higher up in the list. Add a couple of blank lines for OCD while at it. Reviewed-by: Joonas Lahtinen Signed-off-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/ef4cc8e6ddf592c8c2769b84d4b88a5422d46ea5.1480520526.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_pci.c | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index dd3e3170c7e9..c0bcf323dbf0 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -54,6 +54,7 @@ #define CHV_COLORS \ .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 } +/* Keep in gen based order, and chronological order within a gen */ #define GEN2_FEATURES \ .gen = 2, .num_pipes = 1, \ .has_overlay = 1, .overlay_needs_physical = 1, \ @@ -101,6 +102,7 @@ static const struct intel_device_info intel_i915g_info = { .has_overlay = 1, .overlay_needs_physical = 1, .hws_needs_physical = 1, }; + static const struct intel_device_info intel_i915gm_info = { GEN3_FEATURES, .platform = INTEL_I915GM, @@ -111,6 +113,7 @@ static const struct intel_device_info intel_i915gm_info = { .has_fbc = 1, .hws_needs_physical = 1, }; + static const struct intel_device_info intel_i945g_info = { GEN3_FEATURES, .platform = INTEL_I945G, @@ -118,6 +121,7 @@ static const struct intel_device_info intel_i945g_info = { .has_overlay = 1, .overlay_needs_physical = 1, .hws_needs_physical = 1, }; + static const struct intel_device_info intel_i945gm_info = { GEN3_FEATURES, .platform = INTEL_I945GM, .is_mobile = 1, @@ -128,6 +132,20 @@ static const struct intel_device_info intel_i945gm_info = { .hws_needs_physical = 1, }; +static const struct intel_device_info intel_g33_info = { + GEN3_FEATURES, + .platform = INTEL_G33, + .has_hotplug = 1, + .has_overlay = 1, +}; + +static const struct intel_device_info intel_pineview_info = { + GEN3_FEATURES, + .platform = INTEL_G33, .is_pineview = 1, .is_mobile = 1, + .has_hotplug = 1, + .has_overlay = 1, +}; + #define GEN4_FEATURES \ .gen = 4, .num_pipes = 2, \ .has_hotplug = 1, \ @@ -152,13 +170,6 @@ static const struct intel_device_info intel_i965gm_info = { .hws_needs_physical = 1, }; -static const struct intel_device_info intel_g33_info = { - GEN3_FEATURES, - .platform = INTEL_G33, - .has_hotplug = 1, - .has_overlay = 1, -}; - static const struct intel_device_info intel_g45_info = { GEN4_FEATURES, .platform = INTEL_G4X, @@ -175,13 +186,6 @@ static const struct intel_device_info intel_gm45_info = { .ring_mask = RENDER_RING | BSD_RING, }; -static const struct intel_device_info intel_pineview_info = { - GEN3_FEATURES, - .platform = INTEL_G33, .is_pineview = 1, .is_mobile = 1, - .has_hotplug = 1, - .has_overlay = 1, -}; - #define GEN5_FEATURES \ .gen = 5, .num_pipes = 2, \ .has_hotplug = 1, \ -- cgit v1.2.3 From c0f86832e3fd0d125f22bf26040d24837d0412db Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 7 Dec 2016 12:13:04 +0200 Subject: drm/i915: rename BROADWATER and CRESTLINE to I965G and I965GM, respectively Add more consistency to our naming. Pineview remains the outlier. Keep using code names for gen5+. v2: rebased Reviewed-by: Joonas Lahtinen Signed-off-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/1481105584-23033-1-git-send-email-jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 8 ++++---- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/i915_gem_internal.c | 2 +- drivers/gpu/drm/i915/i915_pci.c | 4 ++-- drivers/gpu/drm/i915/intel_device_info.c | 4 ++-- drivers/gpu/drm/i915/intel_display.c | 8 ++++---- drivers/gpu/drm/i915/intel_pm.c | 6 +++--- 9 files changed, 19 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 95f7a5ef0e36..00a36bf87993 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1734,7 +1734,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) if (HAS_PCH_SPLIT(dev_priv)) sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; - else if (IS_CRESTLINE(dev_priv) || IS_G4X(dev_priv) || + else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) || IS_I945G(dev_priv) || IS_I945GM(dev_priv)) sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; else if (IS_I915GM(dev_priv)) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ae583c79c19f..1a7ac2eefe97 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1033,7 +1033,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) * behaviour if any general state is accessed within a page above 4GB, * which also needs to be handled carefully. */ - if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv)) { + if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) { ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (ret) { diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index dc59670160e1..e5465b330886 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -755,8 +755,8 @@ enum intel_platform { INTEL_I945GM, INTEL_G33, INTEL_PINEVIEW, - INTEL_BROADWATER, - INTEL_CRESTLINE, + INTEL_I965G, + INTEL_I965GM, INTEL_G4X, INTEL_IRONLAKE, INTEL_SANDYBRIDGE, @@ -2522,8 +2522,8 @@ intel_info(const struct drm_i915_private *dev_priv) #define IS_I915GM(dev_priv) (INTEL_DEVID(dev_priv) == 0x2592) #define IS_I945G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2772) #define IS_I945GM(dev_priv) ((dev_priv)->info.platform == INTEL_I945GM) -#define IS_BROADWATER(dev_priv) ((dev_priv)->info.platform == INTEL_BROADWATER) -#define IS_CRESTLINE(dev_priv) ((dev_priv)->info.platform == INTEL_CRESTLINE) +#define IS_I965G(dev_priv) ((dev_priv)->info.platform == INTEL_I965G) +#define IS_I965GM(dev_priv) ((dev_priv)->info.platform == INTEL_I965GM) #define IS_GM45(dev_priv) (INTEL_DEVID(dev_priv) == 0x2A42) #define IS_G4X(dev_priv) ((dev_priv)->info.platform == INTEL_G4X) #define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ca0bb837a57f..dd1a34ac830f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3999,7 +3999,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size) goto fail; mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; - if (IS_CRESTLINE(dev_priv) || IS_BROADWATER(dev_priv)) { + if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) { /* 965gm cannot relocate objects above 4GiB. */ mask &= ~__GFP_HIGHMEM; mask |= __GFP_DMA32; diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c index 08d26306d40e..2222863e505f 100644 --- a/drivers/gpu/drm/i915/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/i915_gem_internal.c @@ -71,7 +71,7 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) #endif gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; - if (IS_CRESTLINE(i915) || IS_BROADWATER(i915)) { + if (IS_I965GM(i915) || IS_I965G(i915)) { /* 965gm cannot relocate objects above 4GiB. */ gfp &= ~__GFP_HIGHMEM; gfp |= __GFP_DMA32; diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index c0bcf323dbf0..99e8eed0a1fc 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -156,14 +156,14 @@ static const struct intel_device_info intel_pineview_info = { static const struct intel_device_info intel_i965g_info = { GEN4_FEATURES, - .platform = INTEL_BROADWATER, + .platform = INTEL_I965G, .has_overlay = 1, .hws_needs_physical = 1, }; static const struct intel_device_info intel_i965gm_info = { GEN4_FEATURES, - .platform = INTEL_CRESTLINE, + .platform = INTEL_I965GM, .is_mobile = 1, .has_fbc = 1, .has_overlay = 1, .supports_tv = 1, diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 23b040743a6c..6f4cd4fca957 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -36,8 +36,8 @@ static const char * const platform_names[] = { PLATFORM_NAME(I945GM), PLATFORM_NAME(G33), PLATFORM_NAME(PINEVIEW), - PLATFORM_NAME(BROADWATER), - PLATFORM_NAME(CRESTLINE), + PLATFORM_NAME(I965G), + PLATFORM_NAME(I965GM), PLATFORM_NAME(G4X), PLATFORM_NAME(IRONLAKE), PLATFORM_NAME(SANDYBRIDGE), diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a88c810dbf6b..ab5ba7e08424 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2150,7 +2150,7 @@ static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_pr { if (INTEL_INFO(dev_priv)->gen >= 9) return 256 * 1024; - else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) || + else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) return 128 * 1024; else if (INTEL_INFO(dev_priv)->gen >= 4) @@ -7568,7 +7568,7 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv) vco_table = ctg_vco; else if (IS_G4X(dev_priv)) vco_table = elk_vco; - else if (IS_CRESTLINE(dev_priv)) + else if (IS_I965GM(dev_priv)) vco_table = cl_vco; else if (IS_PINEVIEW(dev_priv)) vco_table = pnv_vco; @@ -16108,14 +16108,14 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) else if (IS_GEN5(dev_priv)) dev_priv->display.get_display_clock_speed = ilk_get_display_clock_speed; - else if (IS_I945G(dev_priv) || IS_BROADWATER(dev_priv) || + else if (IS_I945G(dev_priv) || IS_I965G(dev_priv) || IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) dev_priv->display.get_display_clock_speed = i945_get_display_clock_speed; else if (IS_GM45(dev_priv)) dev_priv->display.get_display_clock_speed = gm45_get_display_clock_speed; - else if (IS_CRESTLINE(dev_priv)) + else if (IS_I965GM(dev_priv)) dev_priv->display.get_display_clock_speed = i965gm_get_display_clock_speed; else if (IS_PINEVIEW(dev_priv)) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d414c870ce6d..c6fe59944a0b 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -321,7 +321,7 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); POSTING_READ(FW_BLC_SELF_VLV); - } else if (IS_G4X(dev_priv) || IS_CRESTLINE(dev_priv)) { + } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) { was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); POSTING_READ(FW_BLC_SELF); @@ -7643,9 +7643,9 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) dev_priv->display.init_clock_gating = ironlake_init_clock_gating; else if (IS_G4X(dev_priv)) dev_priv->display.init_clock_gating = g4x_init_clock_gating; - else if (IS_CRESTLINE(dev_priv)) + else if (IS_I965GM(dev_priv)) dev_priv->display.init_clock_gating = crestline_init_clock_gating; - else if (IS_BROADWATER(dev_priv)) + else if (IS_I965G(dev_priv)) dev_priv->display.init_clock_gating = broadwater_init_clock_gating; else if (IS_GEN3(dev_priv)) dev_priv->display.init_clock_gating = gen3_init_clock_gating; -- cgit v1.2.3 From 2a307c2e91b9478a2a06c09ffcbfc00851c7be26 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 30 Nov 2016 17:43:04 +0200 Subject: drm/i915: add some more "i" in platform names for consistency Consistency FTW. Reviewed-by: Joonas Lahtinen Signed-off-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/9ab811dc06570bd3fc05a917ade1bdc9bb805a75.1480520526.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 4 ++-- drivers/gpu/drm/i915/i915_gem_stolen.c | 2 +- drivers/gpu/drm/i915/i915_pci.c | 4 ++-- drivers/gpu/drm/i915/intel_display.c | 14 +++++++------- drivers/gpu/drm/i915/intel_i2c.c | 2 +- drivers/gpu/drm/i915/intel_overlay.c | 4 ++-- drivers/gpu/drm/i915/intel_ringbuffer.c | 4 ++-- 8 files changed, 18 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 00a36bf87993..a746130c8e32 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2955,7 +2955,7 @@ static bool cursor_active(struct drm_i915_private *dev_priv, int pipe) { u32 state; - if (IS_845G(dev_priv) || IS_I865G(dev_priv)) + if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; else state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e5465b330886..e2a99649d594 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2515,7 +2515,7 @@ intel_info(const struct drm_i915_private *dev_priv) (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until)) #define IS_I830(dev_priv) (INTEL_DEVID(dev_priv) == 0x3577) -#define IS_845G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2562) +#define IS_I845G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2562) #define IS_I85X(dev_priv) ((dev_priv)->info.platform == INTEL_I85X) #define IS_I865G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2572) #define IS_I915G(dev_priv) ((dev_priv)->info.platform == INTEL_I915G) @@ -2667,7 +2667,7 @@ intel_info(const struct drm_i915_private *dev_priv) ((dev_priv)->info.overlay_needs_physical) /* Early gen2 have a totally busted CS tlb and require pinned batches. */ -#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_845G(dev_priv)) +#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv)) /* WaRsDisableCoarsePowerGating:skl,bxt */ #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index b3bac2557665..c81b22f238c3 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -153,7 +153,7 @@ static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv) tom = tmp * MB(32); base = tom - tseg_size - ggtt->stolen_size; - } else if (IS_845G(dev_priv)) { + } else if (IS_I845G(dev_priv)) { u32 tseg_size = 0; u32 tom; u8 tmp; diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 99e8eed0a1fc..2bf9b75ac392 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -71,7 +71,7 @@ static const struct intel_device_info intel_i830_info = { .num_pipes = 2, /* legal, last one wins */ }; -static const struct intel_device_info intel_845g_info = { +static const struct intel_device_info intel_i845g_info = { GEN2_FEATURES, .platform = INTEL_I845G, }; @@ -432,7 +432,7 @@ static const struct intel_device_info intel_kabylake_gt3_info = { */ static const struct pci_device_id pciidlist[] = { INTEL_I830_IDS(&intel_i830_info), - INTEL_I845G_IDS(&intel_845g_info), + INTEL_I845G_IDS(&intel_i845g_info), INTEL_I85X_IDS(&intel_i85x_info), INTEL_I865G_IDS(&intel_i865g_info), INTEL_I915G_IDS(&intel_i915g_info), diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ab5ba7e08424..a41082e2750e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1233,7 +1233,7 @@ static void assert_cursor(struct drm_i915_private *dev_priv, { bool cur_state; - if (IS_845G(dev_priv) || IS_I865G(dev_priv)) + if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; else cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; @@ -10936,7 +10936,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, I915_WRITE(CURPOS(pipe), pos); - if (IS_845G(dev_priv) || IS_I865G(dev_priv)) + if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) i845_update_cursor(crtc, base, plane_state); else i9xx_update_cursor(crtc, base, plane_state); @@ -10954,11 +10954,11 @@ static bool cursor_size_ok(struct drm_i915_private *dev_priv, * the precision of the register. Everything else requires * square cursors, limited to a few power-of-two sizes. */ - if (IS_845G(dev_priv) || IS_I865G(dev_priv)) { + if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { if ((width & 63) != 0) return false; - if (width > (IS_845G(dev_priv) ? 64 : 512)) + if (width > (IS_I845G(dev_priv) ? 64 : 512)) return false; if (height > 1023) @@ -16127,7 +16127,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) else if (IS_I915G(dev_priv)) dev_priv->display.get_display_clock_speed = i915_get_display_clock_speed; - else if (IS_I945GM(dev_priv) || IS_845G(dev_priv)) + else if (IS_I945GM(dev_priv) || IS_I845G(dev_priv)) dev_priv->display.get_display_clock_speed = i9xx_misc_get_display_clock_speed; else if (IS_I915GM(dev_priv)) @@ -16549,8 +16549,8 @@ int intel_modeset_init(struct drm_device *dev) dev->mode_config.max_height = 8192; } - if (IS_845G(dev_priv) || IS_I865G(dev_priv)) { - dev->mode_config.cursor_width = IS_845G(dev_priv) ? 64 : 512; + if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { + dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512; dev->mode_config.cursor_height = 1023; } else if (IS_GEN2(dev_priv)) { dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH; diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 0164130c0488..bce1ba80f277 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -139,7 +139,7 @@ static u32 get_reserved(struct intel_gmbus *bus) u32 reserved = 0; /* On most chips, these bits must be preserved in software. */ - if (!IS_I830(dev_priv) && !IS_845G(dev_priv)) + if (!IS_I830(dev_priv) && !IS_I845G(dev_priv)) reserved = I915_READ_NOTRACE(bus->gpio_reg) & (GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE); diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 90da6a707de7..354f8cec96bb 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -957,7 +957,7 @@ static int check_overlay_src(struct drm_i915_private *dev_priv, u32 tmp; /* check src dimensions */ - if (IS_845G(dev_priv) || IS_I830(dev_priv)) { + if (IS_I845G(dev_priv) || IS_I830(dev_priv)) { if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY || rec->src_width > IMAGE_MAX_WIDTH_LEGACY) return -EINVAL; @@ -1009,7 +1009,7 @@ static int check_overlay_src(struct drm_i915_private *dev_priv, return -EINVAL; /* stride checking */ - if (IS_I830(dev_priv) || IS_845G(dev_priv)) + if (IS_I830(dev_priv) || IS_I845G(dev_priv)) stride_mask = 255; else stride_mask = 63; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index bc18a4f2643d..0b7d13b6e228 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1912,7 +1912,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size) * of the buffer. */ ring->effective_size = size; - if (IS_I830(engine->i915) || IS_845G(engine->i915)) + if (IS_I830(engine->i915) || IS_I845G(engine->i915)) ring->effective_size -= 2 * CACHELINE_BYTES; ring->last_retired_head = -1; @@ -2608,7 +2608,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, engine->emit_bb_start = gen6_emit_bb_start; else if (INTEL_GEN(dev_priv) >= 4) engine->emit_bb_start = i965_emit_bb_start; - else if (IS_I830(dev_priv) || IS_845G(dev_priv)) + else if (IS_I830(dev_priv) || IS_I845G(dev_priv)) engine->emit_bb_start = i830_emit_bb_start; else engine->emit_bb_start = i915_emit_bb_start; -- cgit v1.2.3 From f69c11aeb872e51bf3900a6c0c8397f9ddb9f5bc Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 30 Nov 2016 17:43:05 +0200 Subject: drm/i915: give G45 and GM45 their own platform enums Distinguish them better. Reviewed-by: Joonas Lahtinen Signed-off-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/987709804bc8fe55475e7481fcee03e7b86b1ba3.1480520526.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 8 +++++--- drivers/gpu/drm/i915/i915_pci.c | 4 ++-- drivers/gpu/drm/i915/intel_device_info.c | 3 ++- 3 files changed, 9 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e2a99649d594..01e9f5b11f35 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -757,7 +757,8 @@ enum intel_platform { INTEL_PINEVIEW, INTEL_I965G, INTEL_I965GM, - INTEL_G4X, + INTEL_G45, + INTEL_GM45, INTEL_IRONLAKE, INTEL_SANDYBRIDGE, INTEL_IVYBRIDGE, @@ -2524,8 +2525,9 @@ intel_info(const struct drm_i915_private *dev_priv) #define IS_I945GM(dev_priv) ((dev_priv)->info.platform == INTEL_I945GM) #define IS_I965G(dev_priv) ((dev_priv)->info.platform == INTEL_I965G) #define IS_I965GM(dev_priv) ((dev_priv)->info.platform == INTEL_I965GM) -#define IS_GM45(dev_priv) (INTEL_DEVID(dev_priv) == 0x2A42) -#define IS_G4X(dev_priv) ((dev_priv)->info.platform == INTEL_G4X) +#define IS_G45(dev_priv) ((dev_priv)->info.platform == INTEL_G45) +#define IS_GM45(dev_priv) ((dev_priv)->info.platform == INTEL_GM45) +#define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv)) #define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001) #define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011) #define IS_PINEVIEW(dev_priv) ((dev_priv)->info.is_pineview) diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 2bf9b75ac392..f7ec6e944e09 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -172,14 +172,14 @@ static const struct intel_device_info intel_i965gm_info = { static const struct intel_device_info intel_g45_info = { GEN4_FEATURES, - .platform = INTEL_G4X, + .platform = INTEL_G45, .has_pipe_cxsr = 1, .ring_mask = RENDER_RING | BSD_RING, }; static const struct intel_device_info intel_gm45_info = { GEN4_FEATURES, - .platform = INTEL_G4X, + .platform = INTEL_GM45, .is_mobile = 1, .has_fbc = 1, .has_pipe_cxsr = 1, .supports_tv = 1, diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 6f4cd4fca957..c46415b8c1b9 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -38,7 +38,8 @@ static const char * const platform_names[] = { PLATFORM_NAME(PINEVIEW), PLATFORM_NAME(I965G), PLATFORM_NAME(I965GM), - PLATFORM_NAME(G4X), + PLATFORM_NAME(G45), + PLATFORM_NAME(GM45), PLATFORM_NAME(IRONLAKE), PLATFORM_NAME(SANDYBRIDGE), PLATFORM_NAME(IVYBRIDGE), -- cgit v1.2.3 From 06bcd84873da324a69505cb489f61da2832d87cb Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 30 Nov 2016 17:43:06 +0200 Subject: drm/i915: use platform enum instead of duplicating PCI ID if possible Duplicating the PCI ID for IS_FOO checks is redundant for a bunch of platforms. Simplify. Reviewed-by: Joonas Lahtinen Signed-off-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/4f79321aca2e003a627ba8b6809af3716b7c25c9.1480520526.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 01e9f5b11f35..8daa4fb13b52 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2515,13 +2515,13 @@ intel_info(const struct drm_i915_private *dev_priv) #define IS_REVID(p, since, until) \ (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until)) -#define IS_I830(dev_priv) (INTEL_DEVID(dev_priv) == 0x3577) -#define IS_I845G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2562) +#define IS_I830(dev_priv) ((dev_priv)->info.platform == INTEL_I830) +#define IS_I845G(dev_priv) ((dev_priv)->info.platform == INTEL_I845G) #define IS_I85X(dev_priv) ((dev_priv)->info.platform == INTEL_I85X) -#define IS_I865G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2572) +#define IS_I865G(dev_priv) ((dev_priv)->info.platform == INTEL_I865G) #define IS_I915G(dev_priv) ((dev_priv)->info.platform == INTEL_I915G) -#define IS_I915GM(dev_priv) (INTEL_DEVID(dev_priv) == 0x2592) -#define IS_I945G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2772) +#define IS_I915GM(dev_priv) ((dev_priv)->info.platform == INTEL_I915GM) +#define IS_I945G(dev_priv) ((dev_priv)->info.platform == INTEL_I945G) #define IS_I945GM(dev_priv) ((dev_priv)->info.platform == INTEL_I945GM) #define IS_I965G(dev_priv) ((dev_priv)->info.platform == INTEL_I965G) #define IS_I965GM(dev_priv) ((dev_priv)->info.platform == INTEL_I965GM) -- cgit v1.2.3 From 467a14d935c6d95b94e4197bf63e264eab4c5d06 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 5 Dec 2016 16:13:28 +0200 Subject: drm/i915: Protect DSPARB registers with a spinlock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Each DSPARB register can house bits for two separate pipes, hence we must protect the registers during reprogramming so that parallel FIFO reconfigurations happening simultaneosly on multiple pipes won't corrupt each others values. We'll use a new spinlock for this instead of the wm_mutex since we'll have to move the DSPARB programming to happen from the vblank evade critical section, and we can't use mutexes in there. v2: Document why we use a spinlock instead of a mutex (Maarten) Cc: Maarten Lankhorst Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/1480947208-18468-1-git-send-email-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/i915_drv.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 3 +++ drivers/gpu/drm/i915/intel_pm.c | 6 ++++++ 3 files changed, 10 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 1a7ac2eefe97..5fe27e42d81b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -801,6 +801,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, spin_lock_init(&dev_priv->uncore.lock); spin_lock_init(&dev_priv->mm.object_stat_lock); spin_lock_init(&dev_priv->mmio_flip_lock); + spin_lock_init(&dev_priv->wm.dsparb_lock); mutex_init(&dev_priv->sb_lock); mutex_init(&dev_priv->modeset_restore_lock); mutex_init(&dev_priv->av_mutex); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8daa4fb13b52..33758ac5ec9a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2191,6 +2191,9 @@ struct drm_i915_private { } sagv_status; struct { + /* protects DSPARB registers on pre-g4x/vlv/chv */ + spinlock_t dsparb_lock; + /* * Raw watermark latency values: * in 0.1us units for WM0, diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index c6fe59944a0b..3ea7cf275be0 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -1223,6 +1223,8 @@ static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc) pipe_name(crtc->pipe), sprite0_start, sprite1_start, fifo_size); + spin_lock(&dev_priv->wm.dsparb_lock); + switch (crtc->pipe) { uint32_t dsparb, dsparb2, dsparb3; case PIPE_A: @@ -1279,6 +1281,10 @@ static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc) default: break; } + + POSTING_READ(DSPARB); + + spin_unlock(&dev_priv->wm.dsparb_lock); } #undef VLV_FIFO -- cgit v1.2.3 From 7708550ce5794d76dce5be608a9a91353c1bb98b Mon Sep 17 00:00:00 2001 From: Robert Bragg Date: Thu, 1 Dec 2016 17:21:52 +0000 Subject: drm/i915/perf: use DRM_DEBUG for userspace issues Avoid using DRM_ERROR for conditions userspace can trigger with a bad config when opening a stream or from not reading data in a timely fashion (whereby the OA buffer fills up). These conditions are tested by i-g-t which treats error messages as failures if using the test runner. This wasn't an issue while the i915-perf igt tests were being run in isolation. One message relating to seeing a spurious zeroed report was changed to use DRM_NOTE instead of DRM_ERROR. Ideally this warning shouldn't be seen, but it's not a serious problem if it is. Considering that the tail margin mechanism is only a heuristic it's possible we might see this from time to time. Signed-off-by: Robert Bragg Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161201172152.10893-1-robert@sixbynine.org --- drivers/gpu/drm/i915/i915_perf.c | 42 ++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 5669f0862458..0c6e12428194 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -474,7 +474,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream, * copying it to userspace... */ if (report32[0] == 0) { - DRM_ERROR("Skipping spurious, invalid OA report\n"); + DRM_NOTE("Skipping spurious, invalid OA report\n"); continue; } @@ -551,7 +551,7 @@ static int gen7_oa_read(struct i915_perf_stream *stream, if (ret) return ret; - DRM_ERROR("OA buffer overflow: force restart\n"); + DRM_DEBUG("OA buffer overflow: force restart\n"); dev_priv->perf.oa.ops.oa_disable(dev_priv); dev_priv->perf.oa.ops.oa_enable(dev_priv); @@ -1000,17 +1000,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, * IDs */ if (!dev_priv->perf.metrics_kobj) { - DRM_ERROR("OA metrics weren't advertised via sysfs\n"); + DRM_DEBUG("OA metrics weren't advertised via sysfs\n"); return -EINVAL; } if (!(props->sample_flags & SAMPLE_OA_REPORT)) { - DRM_ERROR("Only OA report sampling supported\n"); + DRM_DEBUG("Only OA report sampling supported\n"); return -EINVAL; } if (!dev_priv->perf.oa.ops.init_oa_buffer) { - DRM_ERROR("OA unit not supported\n"); + DRM_DEBUG("OA unit not supported\n"); return -ENODEV; } @@ -1019,17 +1019,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, * we currently only allow exclusive access */ if (dev_priv->perf.oa.exclusive_stream) { - DRM_ERROR("OA unit already in use\n"); + DRM_DEBUG("OA unit already in use\n"); return -EBUSY; } if (!props->metrics_set) { - DRM_ERROR("OA metric set not specified\n"); + DRM_DEBUG("OA metric set not specified\n"); return -EINVAL; } if (!props->oa_format) { - DRM_ERROR("OA report format not specified\n"); + DRM_DEBUG("OA report format not specified\n"); return -EINVAL; } @@ -1385,7 +1385,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, if (IS_ERR(specific_ctx)) { ret = PTR_ERR(specific_ctx); if (ret != -EINTR) - DRM_ERROR("Failed to look up context with ID %u for opening perf stream\n", + DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n", ctx_handle); goto err; } @@ -1398,7 +1398,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, */ if (!specific_ctx && i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { - DRM_ERROR("Insufficient privileges to open system-wide i915 perf stream\n"); + DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n"); ret = -EACCES; goto err_ctx; } @@ -1477,7 +1477,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv, memset(props, 0, sizeof(struct perf_open_properties)); if (!n_props) { - DRM_ERROR("No i915 perf properties given"); + DRM_DEBUG("No i915 perf properties given\n"); return -EINVAL; } @@ -1488,7 +1488,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv, * from userspace. */ if (n_props >= DRM_I915_PERF_PROP_MAX) { - DRM_ERROR("More i915 perf properties specified than exist"); + DRM_DEBUG("More i915 perf properties specified than exist\n"); return -EINVAL; } @@ -1516,26 +1516,26 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv, case DRM_I915_PERF_PROP_OA_METRICS_SET: if (value == 0 || value > dev_priv->perf.oa.n_builtin_sets) { - DRM_ERROR("Unknown OA metric set ID"); + DRM_DEBUG("Unknown OA metric set ID\n"); return -EINVAL; } props->metrics_set = value; break; case DRM_I915_PERF_PROP_OA_FORMAT: if (value == 0 || value >= I915_OA_FORMAT_MAX) { - DRM_ERROR("Invalid OA report format\n"); + DRM_DEBUG("Invalid OA report format\n"); return -EINVAL; } if (!dev_priv->perf.oa.oa_formats[value].size) { - DRM_ERROR("Invalid OA report format\n"); + DRM_DEBUG("Invalid OA report format\n"); return -EINVAL; } props->oa_format = value; break; case DRM_I915_PERF_PROP_OA_EXPONENT: if (value > OA_EXPONENT_MAX) { - DRM_ERROR("OA timer exponent too high (> %u)\n", - OA_EXPONENT_MAX); + DRM_DEBUG("OA timer exponent too high (> %u)\n", + OA_EXPONENT_MAX); return -EINVAL; } @@ -1566,7 +1566,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv, if (oa_freq_hz > i915_oa_max_sample_rate && !capable(CAP_SYS_ADMIN)) { - DRM_ERROR("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n", + DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n", i915_oa_max_sample_rate); return -EACCES; } @@ -1576,7 +1576,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv, break; default: MISSING_CASE(id); - DRM_ERROR("Unknown i915 perf property ID"); + DRM_DEBUG("Unknown i915 perf property ID\n"); return -EINVAL; } @@ -1596,7 +1596,7 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data, int ret; if (!dev_priv->perf.initialized) { - DRM_ERROR("i915 perf interface not available for this system"); + DRM_DEBUG("i915 perf interface not available for this system\n"); return -ENOTSUPP; } @@ -1604,7 +1604,7 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data, I915_PERF_FLAG_FD_NONBLOCK | I915_PERF_FLAG_DISABLED; if (param->flags & ~known_open_flags) { - DRM_ERROR("Unknown drm_i915_perf_open_param flag\n"); + DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n"); return -EINVAL; } -- cgit v1.2.3 From 2b8208ac93be2783edc627fc02d9ca50cc479923 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Fri, 2 Dec 2016 16:01:28 +0100 Subject: drm/i915/dsi: Fix swapping of MIPI_SEQ_DEASSERT_RESET / MIPI_SEQ_ASSERT_RESET MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Looking at the ADF code from the Android kernel sources for a cherrytrail tablet I noticed that it is calling the MIPI_SEQ_ASSERT_RESET sequence from the panel prepare hook. Until commit b1cb1bd29189 ("drm/i915/dsi: update reset and power sequences in panel prepare/unprepare hooks") the mainline i915 code was doing the same. That commits effectively swaps the calling of MIPI_SEQ_ASSERT_RESET / MIPI_SEQ_DEASSERT_RESET. Looking at the naming of the sequences that is the right thing to do, but the problem is, that the old mainline code and the ADF code was actually calling the right sequence (tested on a cube iwork8 air tablet), and the swapping of the calling breaks things. This breakage was likely not noticed in testing because on cherrytrail, currently chv_exec_gpio ends up disabling the gpio pins rather then setting them (this is fixed in the next patch in this patch-set). This commit fixes the swapping by fixing MIPI_SEQ_ASSERT/DEASSERT_RESET's places in the enum defining them, so that their (new) names match their actual use. Changes in v2: -Add a comment to the enum explaining that the assert/reassert names are swapped in the spec Fixes: b1cb1bd29189 ("drm/i915/dsi: update reset and power sequences...") Cc: Jani Nikula Cc: Ville Syrjälä Signed-off-by: Hans de Goede Acked-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/20161202150128.29871-1-hdegoede@redhat.com Signed-off-by: Ville Syrjälä --- drivers/gpu/drm/i915/intel_bios.h | 12 +++++++++--- drivers/gpu/drm/i915/intel_dsi_panel_vbt.c | 4 ++-- 2 files changed, 11 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 8405b5a367d7..7e3545f65257 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -46,14 +46,20 @@ struct edp_power_seq { u16 t11_t12; } __packed; -/* MIPI Sequence Block definitions */ +/* + * MIPI Sequence Block definitions + * + * Note the VBT spec has AssertReset / DeassertReset swapped from their + * usual naming, we use the proper names here to avoid confusion when + * reading the code. + */ enum mipi_seq { MIPI_SEQ_END = 0, - MIPI_SEQ_ASSERT_RESET, + MIPI_SEQ_DEASSERT_RESET, /* Spec says MipiAssertResetPin */ MIPI_SEQ_INIT_OTP, MIPI_SEQ_DISPLAY_ON, MIPI_SEQ_DISPLAY_OFF, - MIPI_SEQ_DEASSERT_RESET, + MIPI_SEQ_ASSERT_RESET, /* Spec says MipiDeassertResetPin */ MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */ MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */ MIPI_SEQ_TEAR_ON, /* sequence block v2+ */ diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c index 0d8ff0034b88..579d2f570717 100644 --- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c @@ -376,11 +376,11 @@ static const fn_mipi_elem_exec exec_elem[] = { */ static const char * const seq_name[] = { - [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET", + [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET", [MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP", [MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON", [MIPI_SEQ_DISPLAY_OFF] = "MIPI_SEQ_DISPLAY_OFF", - [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET", + [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET", [MIPI_SEQ_BACKLIGHT_ON] = "MIPI_SEQ_BACKLIGHT_ON", [MIPI_SEQ_BACKLIGHT_OFF] = "MIPI_SEQ_BACKLIGHT_OFF", [MIPI_SEQ_TEAR_ON] = "MIPI_SEQ_TEAR_ON", -- cgit v1.2.3 From b2b45fcd921e864a5e9bbc7aa55dee96d5e11c06 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Thu, 1 Dec 2016 21:29:09 +0100 Subject: drm/i915/dsi: Fix chv_exec_gpio disabling the GPIOs it is setting MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Set the CHV_GPIO_GPIOEN bit when updating GPIOs from chv_exec_gpio. Fixes: a0a6d4ffd2ad ("drm/i915/dsi: add support for gpio elements on CHV") Cc: stable@vger.kernel.org Cc: Jani Nikula Cc: Ville Syrjälä Signed-off-by: Hans de Goede Reviewed-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/20161201202925.12220-3-hdegoede@redhat.com Signed-off-by: Ville Syrjälä --- drivers/gpu/drm/i915/intel_dsi_panel_vbt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c index 579d2f570717..47cd1b20fb3e 100644 --- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c @@ -300,7 +300,8 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv, mutex_lock(&dev_priv->sb_lock); vlv_iosf_sb_write(dev_priv, port, cfg1, 0); vlv_iosf_sb_write(dev_priv, port, cfg0, - CHV_GPIO_GPIOCFG_GPO | CHV_GPIO_GPIOTXSTATE(value)); + CHV_GPIO_GPIOEN | CHV_GPIO_GPIOCFG_GPO | + CHV_GPIO_GPIOTXSTATE(value)); mutex_unlock(&dev_priv->sb_lock); } -- cgit v1.2.3 From ef8a4fb479d3791cc40a1adb72d4b5ec655fddaf Mon Sep 17 00:00:00 2001 From: Mahesh Kumar Date: Thu, 1 Dec 2016 21:19:33 +0530 Subject: drm/i915/skl: Add variables to check x_tile and y_tile This patch adds variable to check for X_tiled & y_tiled planes, instead of always checking against framebuffer-modifiers. Changes: - Created separate patch as per Paulo's comment - Added x_tiled variable as well Changes since V2: - Incorporate Paulo's comments - Rebase Changes since V3 (from Paulo): - Bikeshed indentation Reviewed-by: Paulo Zanoni Signed-off-by: Mahesh Kumar Signed-off-by: Paulo Zanoni Link: http://patchwork.freedesktop.org/patch/msgid/20161201154940.24446-2-mahesh1.kumar@intel.com --- drivers/gpu/drm/i915/intel_pm.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 3ea7cf275be0..4db68459fd18 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3585,13 +3585,18 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, struct intel_atomic_state *state = to_intel_atomic_state(cstate->base.state); bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); + bool y_tiled, x_tiled; if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) { *enabled = false; return 0; } - if (apply_memory_bw_wa && fb->modifier == I915_FORMAT_MOD_X_TILED) + y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED || + fb->modifier == I915_FORMAT_MOD_Yf_TILED; + x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED; + + if (apply_memory_bw_wa && x_tiled) latency += 15; width = drm_rect_width(&intel_pstate->base.src) >> 16; @@ -3630,16 +3635,15 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, y_min_scanlines *= 2; plane_bytes_per_line = width * cpp; - if (fb->modifier == I915_FORMAT_MOD_Y_TILED || - fb->modifier == I915_FORMAT_MOD_Yf_TILED) { + if (y_tiled) { plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line * y_min_scanlines, 512); plane_blocks_per_line /= y_min_scanlines; - } else if (fb->modifier == DRM_FORMAT_MOD_NONE) { + } else if (x_tiled) { + plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); + } else { plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1; - } else { - plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); } method1 = skl_wm_method1(plane_pixel_rate, cpp, latency); @@ -3650,8 +3654,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, y_tile_minimum = plane_blocks_per_line * y_min_scanlines; - if (fb->modifier == I915_FORMAT_MOD_Y_TILED || - fb->modifier == I915_FORMAT_MOD_Yf_TILED) { + if (y_tiled) { selected_result = max(method2, y_tile_minimum); } else { if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) && @@ -3667,8 +3670,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line); if (level >= 1 && level <= 7) { - if (fb->modifier == I915_FORMAT_MOD_Y_TILED || - fb->modifier == I915_FORMAT_MOD_Yf_TILED) { + if (y_tiled) { res_blocks += y_tile_minimum; res_lines += y_min_scanlines; } else { -- cgit v1.2.3 From a3a8986cb22aafd83681b7e783ca6d4963712989 Mon Sep 17 00:00:00 2001 From: Mahesh Kumar Date: Thu, 1 Dec 2016 21:19:34 +0530 Subject: drm/i915/bxt: IPC WA for Broxton Display Workarounds #1135 If IPC is enabled in BXT, display underruns are observed. WA: The Line Time programmed in the WM_LINETIME register should be half of the actual calculated Line Time. Programmed Line Time = 1/2*Calculated Line Time Changes since V1: - Add Workaround number in commit & code Changes since V2 (from Paulo): - Bikeshed white space and make the WA tag look like the others Reviewed-by: Paulo Zanoni Signed-off-by: Mahesh Kumar Signed-off-by: Paulo Zanoni Link: http://patchwork.freedesktop.org/patch/msgid/20161201154940.24446-3-mahesh1.kumar@intel.com --- drivers/gpu/drm/i915/i915_drv.c | 2 ++ drivers/gpu/drm/i915/i915_drv.h | 2 ++ drivers/gpu/drm/i915/intel_pm.c | 13 +++++++++++-- 3 files changed, 15 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 5fe27e42d81b..d2cc28f6953d 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1239,6 +1239,8 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) intel_runtime_pm_enable(dev_priv); + dev_priv->ipc_enabled = false; + /* Everything is in place, we can now relax! */ DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", driver.name, driver.major, driver.minor, driver.patchlevel, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 33758ac5ec9a..4eea9255dc53 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2329,6 +2329,8 @@ struct drm_i915_private { /* perform PHY state sanity checks? */ bool chv_phy_assert[2]; + bool ipc_enabled; + /* Used to save the pipe-to-encoder mapping for audio */ struct intel_encoder *av_enc_map[I915_MAX_PIPES]; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 4db68459fd18..9ea3eeed3385 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3761,7 +3761,10 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv, static uint32_t skl_compute_linetime_wm(struct intel_crtc_state *cstate) { + struct drm_atomic_state *state = cstate->base.state; + struct drm_i915_private *dev_priv = to_i915(state->dev); uint32_t pixel_rate; + uint32_t linetime_wm; if (!cstate->base.active) return 0; @@ -3771,8 +3774,14 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate) if (WARN_ON(pixel_rate == 0)) return 0; - return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000, - pixel_rate); + linetime_wm = DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * + 1000, pixel_rate); + + /* Display WA #1135: bxt. */ + if (IS_BROXTON(dev_priv) && dev_priv->ipc_enabled) + linetime_wm = DIV_ROUND_UP(linetime_wm, 2); + + return linetime_wm; } static void skl_compute_transition_wm(struct intel_crtc_state *cstate, -- cgit v1.2.3 From 4b7b2331885a1ab348178f3faf9dc6a4dccae01a Mon Sep 17 00:00:00 2001 From: Mahesh Kumar Date: Thu, 1 Dec 2016 21:19:35 +0530 Subject: drm/i915/kbl: IPC workaround for kabylake Display Workarounds #1141 IPC (Isoch Priority Control) may cause underflows. KBL WA: When IPC is enabled, watermark latency values must be increased by 4us across all levels. This brings level 0 up to 6us. Changes since V1: - Add Workaround number in commit & code Changes since V2 (from Paulo): - Bikeshed the WA tag so it looks like the others Reviewed-by: Paulo Zanoni Signed-off-by: Mahesh Kumar Signed-off-by: Paulo Zanoni Link: http://patchwork.freedesktop.org/patch/msgid/20161201154940.24446-4-mahesh1.kumar@intel.com --- drivers/gpu/drm/i915/intel_pm.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 9ea3eeed3385..315a1b339257 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3596,6 +3596,10 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, fb->modifier == I915_FORMAT_MOD_Yf_TILED; x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED; + /* Display WA #1141: kbl. */ + if (IS_KABYLAKE(dev_priv) && dev_priv->ipc_enabled) + latency += 4; + if (apply_memory_bw_wa && x_tiled) latency += 15; -- cgit v1.2.3 From b95320bdf2d891011da9c5ec85dcb114483856a6 Mon Sep 17 00:00:00 2001 From: Mahesh Kumar Date: Thu, 1 Dec 2016 21:19:37 +0530 Subject: drm/i915/skl+: change WM calc to fixed point 16.16 This patch changes Watermak calculation to fixed point calculation. Problem with current calculation is during plane_blocks_per_line calculation we divide intermediate blocks with min_scanlines and takes floor of the result because of integer operation. hence we end-up assigning less blocks than required. Which leads to flickers. Changes since V1: - Add fixed point data type as per Paulo's review Changes since V2: - use fixed_point instead of fp_16_16 Changes since V3: - rebase Changes since V4 (from Paulo): - My original renaming suggestion was misunderstood, so implement it - Simplify fixed_16_16_to_u32 implementation - Fix indentation Reviewed-by: Paulo Zanoni Signed-off-by: Mahesh Kumar Signed-off-by: Paulo Zanoni Link: http://patchwork.freedesktop.org/patch/msgid/20161201154940.24446-6-mahesh1.kumar@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 84 +++++++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_pm.c | 69 +++++++++++++++++++-------------- 2 files changed, 124 insertions(+), 29 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4eea9255dc53..b9138cd75a39 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -119,6 +119,90 @@ bool __i915_inject_load_failure(const char *func, int line); #define i915_inject_load_failure() \ __i915_inject_load_failure(__func__, __LINE__) +typedef struct { + uint32_t val; +} uint_fixed_16_16_t; + +#define FP_16_16_MAX ({ \ + uint_fixed_16_16_t fp; \ + fp.val = UINT_MAX; \ + fp; \ +}) + +static inline uint_fixed_16_16_t u32_to_fixed_16_16(uint32_t val) +{ + uint_fixed_16_16_t fp; + + WARN_ON(val >> 16); + + fp.val = val << 16; + return fp; +} + +static inline uint32_t fixed_16_16_to_u32_round_up(uint_fixed_16_16_t fp) +{ + return DIV_ROUND_UP(fp.val, 1 << 16); +} + +static inline uint32_t fixed_16_16_to_u32(uint_fixed_16_16_t fp) +{ + return fp.val >> 16; +} + +static inline uint_fixed_16_16_t min_fixed_16_16(uint_fixed_16_16_t min1, + uint_fixed_16_16_t min2) +{ + uint_fixed_16_16_t min; + + min.val = min(min1.val, min2.val); + return min; +} + +static inline uint_fixed_16_16_t max_fixed_16_16(uint_fixed_16_16_t max1, + uint_fixed_16_16_t max2) +{ + uint_fixed_16_16_t max; + + max.val = max(max1.val, max2.val); + return max; +} + +static inline uint_fixed_16_16_t fixed_16_16_div_round_up(uint32_t val, + uint32_t d) +{ + uint_fixed_16_16_t fp, res; + + fp = u32_to_fixed_16_16(val); + res.val = DIV_ROUND_UP(fp.val, d); + return res; +} + +static inline uint_fixed_16_16_t fixed_16_16_div_round_up_u64(uint32_t val, + uint32_t d) +{ + uint_fixed_16_16_t res; + uint64_t interm_val; + + interm_val = (uint64_t)val << 16; + interm_val = DIV_ROUND_UP_ULL(interm_val, d); + WARN_ON(interm_val >> 32); + res.val = (uint32_t) interm_val; + + return res; +} + +static inline uint_fixed_16_16_t mul_u32_fixed_16_16(uint32_t val, + uint_fixed_16_16_t mul) +{ + uint64_t intermediate_val; + uint_fixed_16_16_t fp; + + intermediate_val = (uint64_t) val * mul.val; + WARN_ON(intermediate_val >> 32); + fp.val = (uint32_t) intermediate_val; + return fp; +} + static inline const char *yesno(bool v) { return v ? "yes" : "no"; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 315a1b339257..9171431558a3 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3509,32 +3509,35 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, * should allow pixel_rate up to ~2 GHz which seems sufficient since max * 2xcdclk is 1350 MHz and the pixel rate should never exceed that. */ -static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency) +static uint_fixed_16_16_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, + uint32_t latency) { - uint32_t wm_intermediate_val, ret; + uint32_t wm_intermediate_val; + uint_fixed_16_16_t ret; if (latency == 0) - return UINT_MAX; - - wm_intermediate_val = latency * pixel_rate * cpp / 512; - ret = DIV_ROUND_UP(wm_intermediate_val, 1000); + return FP_16_16_MAX; + wm_intermediate_val = latency * pixel_rate * cpp; + ret = fixed_16_16_div_round_up_u64(wm_intermediate_val, 1000 * 512); return ret; } -static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, - uint32_t latency, uint32_t plane_blocks_per_line) +static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate, + uint32_t pipe_htotal, + uint32_t latency, + uint_fixed_16_16_t plane_blocks_per_line) { - uint32_t ret; uint32_t wm_intermediate_val; + uint_fixed_16_16_t ret; if (latency == 0) - return UINT_MAX; + return FP_16_16_MAX; wm_intermediate_val = latency * pixel_rate; - ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) * - plane_blocks_per_line; - + wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val, + pipe_htotal * 1000); + ret = mul_u32_fixed_16_16(wm_intermediate_val, plane_blocks_per_line); return ret; } @@ -3574,14 +3577,17 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, struct drm_plane_state *pstate = &intel_pstate->base; struct drm_framebuffer *fb = pstate->fb; uint32_t latency = dev_priv->wm.skl_latency[level]; - uint32_t method1, method2; - uint32_t plane_bytes_per_line, plane_blocks_per_line; + uint_fixed_16_16_t method1, method2; + uint_fixed_16_16_t plane_blocks_per_line; + uint_fixed_16_16_t selected_result; + uint32_t interm_pbpl; + uint32_t plane_bytes_per_line; uint32_t res_blocks, res_lines; - uint32_t selected_result; uint8_t cpp; uint32_t width = 0, height = 0; uint32_t plane_pixel_rate; - uint32_t y_tile_minimum, y_min_scanlines; + uint_fixed_16_16_t y_tile_minimum; + uint32_t y_min_scanlines; struct intel_atomic_state *state = to_intel_atomic_state(cstate->base.state); bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); @@ -3640,14 +3646,16 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, plane_bytes_per_line = width * cpp; if (y_tiled) { + interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line * + y_min_scanlines, 512); plane_blocks_per_line = - DIV_ROUND_UP(plane_bytes_per_line * y_min_scanlines, 512); - plane_blocks_per_line /= y_min_scanlines; + fixed_16_16_div_round_up(interm_pbpl, y_min_scanlines); } else if (x_tiled) { - plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); + interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512); + plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl); } else { - plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) - + 1; + interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1; + plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl); } method1 = skl_wm_method1(plane_pixel_rate, cpp, latency); @@ -3656,26 +3664,29 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, latency, plane_blocks_per_line); - y_tile_minimum = plane_blocks_per_line * y_min_scanlines; + y_tile_minimum = mul_u32_fixed_16_16(y_min_scanlines, + plane_blocks_per_line); if (y_tiled) { - selected_result = max(method2, y_tile_minimum); + selected_result = max_fixed_16_16(method2, y_tile_minimum); } else { if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) && (plane_bytes_per_line / 512 < 1)) selected_result = method2; - else if ((ddb_allocation / plane_blocks_per_line) >= 1) - selected_result = min(method1, method2); + else if ((ddb_allocation / + fixed_16_16_to_u32_round_up(plane_blocks_per_line)) >= 1) + selected_result = min_fixed_16_16(method1, method2); else selected_result = method1; } - res_blocks = selected_result + 1; - res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line); + res_blocks = fixed_16_16_to_u32_round_up(selected_result) + 1; + res_lines = DIV_ROUND_UP(selected_result.val, + plane_blocks_per_line.val); if (level >= 1 && level <= 7) { if (y_tiled) { - res_blocks += y_tile_minimum; + res_blocks += fixed_16_16_to_u32_round_up(y_tile_minimum); res_lines += y_min_scanlines; } else { res_blocks++; -- cgit v1.2.3 From ccc24b39a6fa3b0ba6b4310c10d7e0288ec8db10 Mon Sep 17 00:00:00 2001 From: Mahesh Kumar Date: Thu, 1 Dec 2016 21:19:38 +0530 Subject: drm/i915: Add intel_atomic_get_existing_crtc_state function This patch Adds a function to extract intel_crtc_state from the atomic_state, if not available it returns NULL. v2 (from Paulo): - Fix white space problem detected by checkpatch. Signed-off-by: Mahesh Kumar Reviewed-by: Paulo Zanoni Signed-off-by: Paulo Zanoni Link: http://patchwork.freedesktop.org/patch/msgid/20161201154940.24446-7-mahesh1.kumar@intel.com --- drivers/gpu/drm/i915/intel_drv.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index fd77a3ba857b..8f4ddca0f521 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1809,6 +1809,20 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state, return to_intel_crtc_state(crtc_state); } +static inline struct intel_crtc_state * +intel_atomic_get_existing_crtc_state(struct drm_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_crtc_state *crtc_state; + + crtc_state = drm_atomic_get_existing_crtc_state(state, &crtc->base); + + if (crtc_state) + return to_intel_crtc_state(crtc_state); + else + return NULL; +} + static inline struct intel_plane_state * intel_atomic_get_existing_plane_state(struct drm_atomic_state *state, struct intel_plane *plane) -- cgit v1.2.3 From 73f67aa8cc3c0ab7299f2fc81049a7d8d934fef8 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 7 Dec 2016 22:48:09 +0200 Subject: drm/i915: distinguish G33 and Pineview from each other MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pineview deserves to use its own platform enum (which was already added, unused, previously). IS_G33() no longer matches Pineview, and gets replaced by IS_G33() || IS_PINEVIEW() or equivalent. Pineview is no longer an outlier among platform definitions. Cc: Ville Syrjälä Cc: Chris Wilson Cc: Joonas Lahtinen Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/1481143689-19672-1-git-send-email-jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 3 +-- drivers/gpu/drm/i915/i915_gem.c | 8 +++++--- drivers/gpu/drm/i915/i915_gem_fence_reg.c | 5 +++-- drivers/gpu/drm/i915/i915_gem_stolen.c | 4 ++-- drivers/gpu/drm/i915/i915_pci.c | 2 +- drivers/gpu/drm/i915/intel_display.c | 5 +++-- drivers/gpu/drm/i915/intel_sdvo.c | 2 +- drivers/gpu/drm/i915/intel_uncore.c | 2 +- 8 files changed, 17 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b9138cd75a39..1480e733312a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -772,7 +772,6 @@ struct intel_csr { #define DEV_INFO_FOR_EACH_FLAG(func) \ func(is_mobile); \ - func(is_pineview); \ func(is_lp); \ func(is_alpha_support); \ /* Keep has_* in alphabetical order */ \ @@ -2619,7 +2618,7 @@ intel_info(const struct drm_i915_private *dev_priv) #define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv)) #define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001) #define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011) -#define IS_PINEVIEW(dev_priv) ((dev_priv)->info.is_pineview) +#define IS_PINEVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_PINEVIEW) #define IS_G33(dev_priv) ((dev_priv)->info.platform == INTEL_G33) #define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046) #define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.platform == INTEL_IVYBRIDGE) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index dd1a34ac830f..36183945e61a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2084,7 +2084,8 @@ u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size, * Minimum alignment is 4k (GTT page size), but might be greater * if a fence register is needed for the object. */ - if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) || + if (INTEL_GEN(dev_priv) >= 4 || + (!fenced && (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))) || tiling_mode == I915_TILING_NONE) return 4096; @@ -4498,8 +4499,9 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv) if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) dev_priv->num_fence_regs = 32; - else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) || - IS_I945GM(dev_priv) || IS_G33(dev_priv)) + else if (INTEL_INFO(dev_priv)->gen >= 4 || + IS_I945G(dev_priv) || IS_I945GM(dev_priv) || + IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) dev_priv->num_fence_regs = 16; else dev_priv->num_fence_regs = 8; diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index d006bcb69e91..09193cfb5d8b 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -512,8 +512,9 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv) */ swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; - } else if (IS_MOBILE(dev_priv) || (IS_GEN3(dev_priv) && - !IS_G33(dev_priv))) { + } else if (IS_MOBILE(dev_priv) || + (IS_GEN3(dev_priv) && + !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv))) { uint32_t dcc; /* On 9xx chipsets, channel interleave by the CPU is diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index c81b22f238c3..efc0e748ef89 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -203,8 +203,8 @@ static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv) return 0; /* make sure we don't clobber the GTT if it's within stolen memory */ - if (INTEL_GEN(dev_priv) <= 4 && !IS_G33(dev_priv) && - !IS_G4X(dev_priv)) { + if (INTEL_GEN(dev_priv) <= 4 && + !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) { struct { u32 start, end; } stolen[2] = { diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index f7ec6e944e09..93f50ef2a309 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -141,7 +141,7 @@ static const struct intel_device_info intel_g33_info = { static const struct intel_device_info intel_pineview_info = { GEN3_FEATURES, - .platform = INTEL_G33, .is_pineview = 1, .is_mobile = 1, + .platform = INTEL_PINEVIEW, .is_mobile = 1, .has_hotplug = 1, .has_overlay = 1, }; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a41082e2750e..9eaf1e5bdae9 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8180,7 +8180,8 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc, else dpll |= DPLLB_MODE_DAC_SERIAL; - if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || IS_G33(dev_priv)) { + if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || + IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { dpll |= (crtc_state->pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; } @@ -8893,7 +8894,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; pipe_config->dpll_hw_state.dpll_md = tmp; } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || - IS_G33(dev_priv)) { + IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { tmp = I915_READ(DPLL(crtc->pipe)); pipe_config->pixel_multiplier = ((tmp & SDVO_MULTIPLIER_MASK) diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 054acd974e2d..2ad13903a054 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1296,7 +1296,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, if (INTEL_GEN(dev_priv) >= 4) { /* done in crtc_mode_set as the dpll_md reg must be written early */ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || - IS_G33(dev_priv)) { + IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { /* done in crtc_mode_set as it lives inside the dpll register */ } else { sdvox |= (crtc_state->pixel_multiplier - 1) diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 07779d0c71e6..2007b138af0e 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1812,7 +1812,7 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) return ironlake_do_reset; else if (IS_G4X(dev_priv)) return g4x_do_reset; - else if (IS_G33(dev_priv)) + else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) return g33_do_reset; else if (INTEL_INFO(dev_priv)->gen >= 3) return i915_do_reset; -- cgit v1.2.3 From 991914274bdde1e0de28cadc8eca4c736ec3fd07 Mon Sep 17 00:00:00 2001 From: Joonas Lahtinen Date: Wed, 7 Dec 2016 16:22:39 +0200 Subject: drm/i915: Catch non-existent registers in find_fw_domain Add WARN_ON to find_fw_domain to registers related to uninitialized hardware. v2: - Print the uninitialized domains and register (Chris) Cc: Imre Deak Cc: Wang Elaine Cc: Chris Wilson Signed-off-by: Joonas Lahtinen Reviewed-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/1481120559-17413-1-git-send-email-joonas.lahtinen@linux.intel.com --- drivers/gpu/drm/i915/intel_uncore.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 2007b138af0e..8fc5f29e79a8 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -625,7 +625,14 @@ find_fw_domain(struct drm_i915_private *dev_priv, u32 offset) dev_priv->uncore.fw_domains_table_entries, fw_range_cmp); - return entry ? entry->domains : 0; + if (!entry) + return 0; + + WARN(entry->domains & ~dev_priv->uncore.fw_domains, + "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n", + entry->domains & ~dev_priv->uncore.fw_domains, offset); + + return entry->domains; } static void -- cgit v1.2.3 From 4e8048f80aa0ab1e05cb4d866922811996f28da4 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Tue, 6 Dec 2016 10:50:20 +0000 Subject: drm/i915: Shrink pipe config checker Replace INTEL_ERR_OR_DBG_KMS macro with an intel_err_or_dbg_kms function to shrink the code and rodata strings. text data bss dec hex filename 1271480 41831 2016 1315327 1411ff i915.ko.0 1265160 41831 2016 1309007 13f94f i915.ko.2 Total of ~6 KiB saving across text and strings. v2: * Annotate the function for printf-style checking. * Rename to pipe_config_err. (Chris Wilson) Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/1481021420-5783-1-git-send-email-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/intel_display.c | 51 ++++++++++++++++++++++++------------ 1 file changed, 34 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9eaf1e5bdae9..c956f342fd19 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13210,6 +13210,31 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n, return false; } +static void __printf(3, 4) +pipe_config_err(bool adjust, const char *name, const char *format, ...) +{ + char *level; + unsigned int category; + struct va_format vaf; + va_list args; + + if (adjust) { + level = KERN_DEBUG; + category = DRM_UT_KMS; + } else { + level = KERN_ERR; + category = DRM_UT_NONE; + } + + va_start(args, format); + vaf.fmt = format; + vaf.va = &args; + + drm_printk(level, category, "mismatch in %s %pV", name, &vaf); + + va_end(args); +} + static bool intel_pipe_config_compare(struct drm_i915_private *dev_priv, struct intel_crtc_state *current_config, @@ -13218,17 +13243,9 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, { bool ret = true; -#define INTEL_ERR_OR_DBG_KMS(fmt, ...) \ - do { \ - if (!adjust) \ - DRM_ERROR(fmt, ##__VA_ARGS__); \ - else \ - DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \ - } while (0) - #define PIPE_CONF_CHECK_X(name) \ if (current_config->name != pipe_config->name) { \ - INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ + pipe_config_err(adjust, __stringify(name), \ "(expected 0x%08x, found 0x%08x)\n", \ current_config->name, \ pipe_config->name); \ @@ -13237,7 +13254,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, #define PIPE_CONF_CHECK_I(name) \ if (current_config->name != pipe_config->name) { \ - INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ + pipe_config_err(adjust, __stringify(name), \ "(expected %i, found %i)\n", \ current_config->name, \ pipe_config->name); \ @@ -13246,7 +13263,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, #define PIPE_CONF_CHECK_P(name) \ if (current_config->name != pipe_config->name) { \ - INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ + pipe_config_err(adjust, __stringify(name), \ "(expected %p, found %p)\n", \ current_config->name, \ pipe_config->name); \ @@ -13257,7 +13274,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, if (!intel_compare_link_m_n(¤t_config->name, \ &pipe_config->name,\ adjust)) { \ - INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ + pipe_config_err(adjust, __stringify(name), \ "(expected tu %i gmch %i/%i link %i/%i, " \ "found tu %i, gmch %i/%i link %i/%i)\n", \ current_config->name.tu, \ @@ -13283,7 +13300,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, &pipe_config->name, adjust) && \ !intel_compare_link_m_n(¤t_config->alt_name, \ &pipe_config->name, adjust)) { \ - INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ + pipe_config_err(adjust, __stringify(name), \ "(expected tu %i gmch %i/%i link %i/%i, " \ "or tu %i gmch %i/%i link %i/%i, " \ "found tu %i, gmch %i/%i link %i/%i)\n", \ @@ -13307,8 +13324,9 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, #define PIPE_CONF_CHECK_FLAGS(name, mask) \ if ((current_config->name ^ pipe_config->name) & (mask)) { \ - INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \ - "(expected %i, found %i)\n", \ + pipe_config_err(adjust, __stringify(name), \ + "(%x) (expected %i, found %i)\n", \ + (mask), \ current_config->name & (mask), \ pipe_config->name & (mask)); \ ret = false; \ @@ -13316,7 +13334,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \ if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ - INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ + pipe_config_err(adjust, __stringify(name), \ "(expected %i, found %i)\n", \ current_config->name, \ pipe_config->name); \ @@ -13433,7 +13451,6 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, #undef PIPE_CONF_CHECK_FLAGS #undef PIPE_CONF_CHECK_CLOCK_FUZZY #undef PIPE_CONF_QUIRK -#undef INTEL_ERR_OR_DBG_KMS return ret; } -- cgit v1.2.3 From d82faafd81fe1cf2a11cbc94b05d9fd22e651d49 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 8 Dec 2016 11:28:47 +0100 Subject: drm: Update drm_device docs about embedding. It's supported now! Spotted while reviewing Chris' patch to add a release hook. Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161208102847.3063-1-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_drv.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index f74b7d06ec01..4ec61ac27477 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -323,9 +323,8 @@ void drm_minor_release(struct drm_minor *minor) * historical baggage. Hence use the reference counting provided by * drm_dev_ref() and drm_dev_unref() only carefully. * - * Also note that embedding of &drm_device is currently not (yet) supported (but - * it would be easy to add). Drivers can store driver-private data in the - * dev_priv field of &drm_device. + * It is recommended that drivers embed struct &drm_device into their own device + * structure, which is supported through drm_dev_init(). */ /** @@ -462,7 +461,11 @@ static void drm_fs_inode_free(struct inode *inode) * Note that for purely virtual devices @parent can be NULL. * * Drivers that do not want to allocate their own device struct - * embedding struct &drm_device can call drm_dev_alloc() instead. + * embedding struct &drm_device can call drm_dev_alloc() instead. For drivers + * that do embed struct &drm_device it must be placed first in the overall + * structure, and the overall structure must be allocated using kmalloc(): The + * drm core's release function unconditionally calls kfree() on the @dev pointer + * when the final reference is released. * * RETURNS: * 0 on success, or error code on failure. -- cgit v1.2.3 From 87a6752c43f28363b0fd330874dec20249292d2f Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Tue, 6 Dec 2016 19:04:13 +0000 Subject: drm/i915: Use DRM_DEV_ERROR in i915_driver_load Now that it is available we don't have to open code a similar error message ourselves. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/1481051053-29783-1-git-send-email-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_drv.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index d2cc28f6953d..6428588518aa 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1190,8 +1190,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) if (dev_priv) ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev); if (ret) { - dev_printk(KERN_ERR, &pdev->dev, - "[" DRM_NAME ":%s] allocation failed\n", __func__); + DRM_DEV_ERROR(&pdev->dev, "allocation failed\n"); kfree(dev_priv); return ret; } -- cgit v1.2.3 From 7da0e124af76c79927f0d7abcc58d58d0fd7072f Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Thu, 8 Dec 2016 14:45:24 +0100 Subject: drm/atomic: Use active instead of enable in wait_for_vblanks. When DPMS was introduced to atomic, vblanks only worked when the crtc was enabled and active. wait_for_vblanks were not converted to check for crtc_state->active, which may cause an attempt for vblank_get to fail. This is probably harmless, but convert from enable to active anyway. Signed-off-by: Maarten Lankhorst Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1481204729-9058-2-git-send-email-maarten.lankhorst@linux.intel.com --- drivers/gpu/drm/drm_atomic_helper.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 583f47f27b36..23767df72615 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1117,7 +1117,7 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, * vblank wait) in the ->enable boolean. */ old_crtc_state->enable = false; - if (!crtc->state->enable) + if (!crtc->state->active) continue; /* Legacy cursor ioctls are completely unsynced, and userspace -- cgit v1.2.3 From a0b8a1fe34430c3a82258e8cb45f5968bdf31afd Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Mon, 5 Dec 2016 18:27:37 +0200 Subject: drm/i915/gen9: Fix PCODE polling during CDCLK change notification MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 848496e5902833600f7992f4faa82dc1546051ba Author: Ville Syrjälä Date: Wed Jul 13 16:32:03 2016 +0300 drm/i915: Wait up to 3ms for the pcu to ack the cdclk change request on SKL increased the timeout to match the spec, but we still see a timeout on at least one SKL. A CDCLK change request following the failed one will succeed nevertheless. I could reproduce this problem easily by running kms_pipe_crc_basic in a loop. In all failure cases _wait_for() was pre-empted for >3ms and so in the worst case - when the pre-emption happened right after calculating timeout__ in _wait_for() - we called skl_cdclk_wait_for_pcu_ready() only once which failed and so _wait_for() timed out. As opposed to this the spec says to keep retrying the request for at most a 3ms period. To fix this send the first request explicitly to guarantee that there is 3ms between the first and last request. Though this matches the spec, I noticed that in rare cases this can still time out if we sent only a few requests (in the worst case 2) _and_ PCODE is busy for some reason even after a previous request and a 3ms delay. To work around this retry the polling with pre-emption disabled to maximize the number of requests. Also increase the timeout to 10ms to account for interrupts that could reduce the number of requests. With this change I couldn't trigger the problem. v2: - Use 1ms poll period instead of 10us. (Chris) v3: - Poll with pre-emption disabled to increase the number of request attempts. (Ville, Chris) - Factor out a helper to poll, it's also needed by the next patch. v4: - Pass reply_mask, reply to skl_pcode_request(), instead of assuming the reply is generic. (Ville) v5: - List the request specific timeout values as code comment. (Ville) v6: - Try the poll first with preemption enabled. - Add code comment about first request being queued by PCODE. (Art) - Add timeout_base_ms argument. (Ville) v7: - Clarify code comment about first queued request. (Chris) Cc: Ville Syrjälä Cc: Chris Wilson Cc: Art Runyan Cc: # v4.2- : 3b2c171 : drm/i915: Wait up to 3ms Cc: # v4.2- Fixes: 5d96d8afcfbb ("drm/i915/skl: Deinit/init the display at suspend/resume") Reference: https://bugs.freedesktop.org/show_bug.cgi?id=97929 Testcase: igt/kms_pipe_crc_basic/suspend-read-crc-pipe-B Signed-off-by: Imre Deak Reviewed-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/1480955258-26311-1-git-send-email-imre.deak@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 2 + drivers/gpu/drm/i915/intel_display.c | 31 +++++---------- drivers/gpu/drm/i915/intel_pm.c | 75 ++++++++++++++++++++++++++++++++++++ 3 files changed, 87 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1480e733312a..288152af7d89 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3705,6 +3705,8 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); +int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, + u32 reply_mask, u32 reply, int timeout_base_ms); /* intel_sideband.c */ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index c956f342fd19..bc1af87789bc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6271,35 +6271,24 @@ skl_dpll0_disable(struct drm_i915_private *dev_priv) dev_priv->cdclk_pll.vco = 0; } -static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv) -{ - int ret; - u32 val; - - /* inform PCU we want to change CDCLK */ - val = SKL_CDCLK_PREPARE_FOR_CHANGE; - mutex_lock(&dev_priv->rps.hw_lock); - ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val); - mutex_unlock(&dev_priv->rps.hw_lock); - - return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE); -} - -static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv) -{ - return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0; -} - static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco) { u32 freq_select, pcu_ack; + int ret; WARN_ON((cdclk == 24000) != (vco == 0)); DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco); - if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) { - DRM_ERROR("failed to inform PCU about cdclk change\n"); + mutex_lock(&dev_priv->rps.hw_lock); + ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, + SKL_CDCLK_PREPARE_FOR_CHANGE, + SKL_CDCLK_READY_FOR_CHANGE, + SKL_CDCLK_READY_FOR_CHANGE, 3); + mutex_unlock(&dev_priv->rps.hw_lock); + if (ret) { + DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n", + ret); return; } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 9171431558a3..b48e1c1d6ecb 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -7896,6 +7896,81 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, return 0; } +static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox, + u32 request, u32 reply_mask, u32 reply, + u32 *status) +{ + u32 val = request; + + *status = sandybridge_pcode_read(dev_priv, mbox, &val); + + return *status || ((val & reply_mask) == reply); +} + +/** + * skl_pcode_request - send PCODE request until acknowledgment + * @dev_priv: device private + * @mbox: PCODE mailbox ID the request is targeted for + * @request: request ID + * @reply_mask: mask used to check for request acknowledgment + * @reply: value used to check for request acknowledgment + * @timeout_base_ms: timeout for polling with preemption enabled + * + * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE + * reports an error or an overall timeout of @timeout_base_ms+10 ms expires. + * The request is acknowledged once the PCODE reply dword equals @reply after + * applying @reply_mask. Polling is first attempted with preemption enabled + * for @timeout_base_ms and if this times out for another 10 ms with + * preemption disabled. + * + * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some + * other error as reported by PCODE. + */ +int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, + u32 reply_mask, u32 reply, int timeout_base_ms) +{ + u32 status; + int ret; + + WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + +#define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \ + &status) + + /* + * Prime the PCODE by doing a request first. Normally it guarantees + * that a subsequent request, at most @timeout_base_ms later, succeeds. + * _wait_for() doesn't guarantee when its passed condition is evaluated + * first, so send the first request explicitly. + */ + if (COND) { + ret = 0; + goto out; + } + ret = _wait_for(COND, timeout_base_ms * 1000, 10); + if (!ret) + goto out; + + /* + * The above can time out if the number of requests was low (2 in the + * worst case) _and_ PCODE was busy for some reason even after a + * (queued) request and @timeout_base_ms delay. As a workaround retry + * the poll with preemption disabled to maximize the number of + * requests. Increase the timeout from @timeout_base_ms to 10ms to + * account for interrupts that could reduce the number of these + * requests. + */ + DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n"); + WARN_ON_ONCE(timeout_base_ms > 3); + preempt_disable(); + ret = wait_for_atomic(COND, 10); + preempt_enable(); + +out: + return ret ? ret : status; +#undef COND +} + static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) { /* -- cgit v1.2.3 From b3b8e99984a4eace91bc097e8f8cec71441cae16 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Mon, 5 Dec 2016 18:27:38 +0200 Subject: drm/i915/gen9: Fix PCODE polling during SAGV disabling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit According to the previous patch, it's possible atm that we call intel_do_sagv_disable() only once during the 1ms period and time out if that call fails. As opposed to this the spec says that we need to keep retrying this request for a 1ms duration, so let's do this similarly to the CDCLK change notification request. v4-5: - Rebased on the reply_mask, reply change. v6: - Remove w/s change. (Lyude) - Rebased on the timeout_base argument change. Cc: Lyude Cc: Ville Syrjälä Cc: Chris Wilson Fixes: 656d1b89e5ff ("drm/i915/skl: Add support for the SAGV, fix underrun hangs") Signed-off-by: Imre Deak Reviewed-by: Lyude (v4) Link: http://patchwork.freedesktop.org/patch/msgid/1480955258-26311-2-git-send-email-imre.deak@intel.com --- drivers/gpu/drm/i915/intel_pm.c | 34 +++++++++------------------------- 1 file changed, 9 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index b48e1c1d6ecb..06e55967f180 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2960,24 +2960,10 @@ intel_enable_sagv(struct drm_i915_private *dev_priv) return 0; } -static int -intel_do_sagv_disable(struct drm_i915_private *dev_priv) -{ - int ret; - uint32_t temp = GEN9_SAGV_DISABLE; - - ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL, - &temp); - if (ret) - return ret; - else - return temp & GEN9_SAGV_IS_DISABLED; -} - int intel_disable_sagv(struct drm_i915_private *dev_priv) { - int ret, result; + int ret; if (!intel_has_sagv(dev_priv)) return 0; @@ -2989,25 +2975,23 @@ intel_disable_sagv(struct drm_i915_private *dev_priv) mutex_lock(&dev_priv->rps.hw_lock); /* bspec says to keep retrying for at least 1 ms */ - ret = wait_for(result = intel_do_sagv_disable(dev_priv), 1); + ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL, + GEN9_SAGV_DISABLE, + GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, + 1); mutex_unlock(&dev_priv->rps.hw_lock); - if (ret == -ETIMEDOUT) { - DRM_ERROR("Request to disable SAGV timed out\n"); - return -ETIMEDOUT; - } - /* * Some skl systems, pre-release machines in particular, * don't actually have an SAGV. */ - if (IS_SKYLAKE(dev_priv) && result == -ENXIO) { + if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) { DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; return 0; - } else if (result < 0) { - DRM_ERROR("Failed to disable the SAGV\n"); - return result; + } else if (ret < 0) { + DRM_ERROR("Failed to disable the SAGV (%d)\n", ret); + return ret; } dev_priv->sagv_status = I915_SAGV_DISABLED; -- cgit v1.2.3 From 16d98b31f807756269106f9a71b1a3dc0d19c629 Mon Sep 17 00:00:00 2001 From: Robert Bragg Date: Wed, 7 Dec 2016 21:40:33 +0000 Subject: drm/i915/perf: More documentation hooked to i915.rst This adds a 'Perf' section to i915.rst with the following sub sections: - Overview - Comparison with Core Perf - i915 Driver Entry Points - i915 Perf Stream - i915 Perf Observation Architecture Stream - All i915 Perf Internals v2: section headers in i915.rst (Daniel Vetter) missing symbol docs + other fixups (Matthew Auld) Signed-off-by: Robert Bragg Reviewed-by: Matthew Auld Cc: Daniel Vetter Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161207214033.3581-1-robert@sixbynine.org --- Documentation/gpu/i915.rst | 91 +++++++++ drivers/gpu/drm/i915/i915_drv.h | 151 +++++++++++--- drivers/gpu/drm/i915/i915_perf.c | 412 ++++++++++++++++++++++++++++++++++++--- 3 files changed, 598 insertions(+), 56 deletions(-) (limited to 'drivers') diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst index 117d2ab7a5f7..3843ef688341 100644 --- a/Documentation/gpu/i915.rst +++ b/Documentation/gpu/i915.rst @@ -356,4 +356,95 @@ switch_mm .. kernel-doc:: drivers/gpu/drm/i915/i915_trace.h :doc: switch_mm tracepoint +Perf +==== + +Overview +-------- +.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c + :doc: i915 Perf Overview + +Comparison with Core Perf +------------------------- +.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c + :doc: i915 Perf History and Comparison with Core Perf + +i915 Driver Entry Points +------------------------ + +This section covers the entrypoints exported outside of i915_perf.c to +integrate with drm/i915 and to handle the `DRM_I915_PERF_OPEN` ioctl. + +.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c + :functions: i915_perf_init +.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c + :functions: i915_perf_fini +.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c + :functions: i915_perf_register +.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c + :functions: i915_perf_unregister +.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c + :functions: i915_perf_open_ioctl +.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c + :functions: i915_perf_release + +i915 Perf Stream +---------------- + +This section covers the stream-semantics-agnostic structures and functions +for representing an i915 perf stream FD and associated file operations. + +.. kernel-doc:: drivers/gpu/drm/i915/i915_drv.h + :functions: i915_perf_stream +.. kernel-doc:: drivers/gpu/drm/i915/i915_drv.h + :functions: i915_perf_stream_ops + +.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c + :functions: read_properties_unlocked +.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c + :functions: i915_perf_open_ioctl_locked +.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c + :functions: i915_perf_destroy_locked +.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c + :functions: i915_perf_read +.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c + :functions: i915_perf_ioctl +.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c + :functions: i915_perf_enable_locked +.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c + :functions: i915_perf_disable_locked +.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c + :functions: i915_perf_poll +.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c + :functions: i915_perf_poll_locked + +i915 Perf Observation Architecture Stream +----------------------------------------- + +.. kernel-doc:: drivers/gpu/drm/i915/i915_drv.h + :functions: i915_oa_ops + +.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c + :functions: i915_oa_stream_init +.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c + :functions: i915_oa_read +.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c + :functions: i915_oa_stream_enable +.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c + :functions: i915_oa_stream_disable +.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c + :functions: i915_oa_wait_unlocked +.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c + :functions: i915_oa_poll_wait + +All i915 Perf Internals +----------------------- + +This section simply includes all currently documented i915 perf internals, in +no particular order, but may include some more minor utilities or platform +specific details than found in the more high-level sections. + +.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c + :internal: + .. WARNING: DOCPROC directive not supported: !Cdrivers/gpu/drm/i915/i915_irq.c diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 288152af7d89..40c55c9c9eb4 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1930,89 +1930,186 @@ struct i915_oa_reg { struct i915_perf_stream; +/** + * struct i915_perf_stream_ops - the OPs to support a specific stream type + */ struct i915_perf_stream_ops { - /* Enables the collection of HW samples, either in response to - * I915_PERF_IOCTL_ENABLE or implicitly called when stream is - * opened without I915_PERF_FLAG_DISABLED. + /** + * @enable: Enables the collection of HW samples, either in response to + * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened + * without `I915_PERF_FLAG_DISABLED`. */ void (*enable)(struct i915_perf_stream *stream); - /* Disables the collection of HW samples, either in response to - * I915_PERF_IOCTL_DISABLE or implicitly called before - * destroying the stream. + /** + * @disable: Disables the collection of HW samples, either in response + * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying + * the stream. */ void (*disable)(struct i915_perf_stream *stream); - /* Call poll_wait, passing a wait queue that will be woken + /** + * @poll_wait: Call poll_wait, passing a wait queue that will be woken * once there is something ready to read() for the stream */ void (*poll_wait)(struct i915_perf_stream *stream, struct file *file, poll_table *wait); - /* For handling a blocking read, wait until there is something - * to ready to read() for the stream. E.g. wait on the same + /** + * @wait_unlocked: For handling a blocking read, wait until there is + * something to ready to read() for the stream. E.g. wait on the same * wait queue that would be passed to poll_wait(). */ int (*wait_unlocked)(struct i915_perf_stream *stream); - /* read - Copy buffered metrics as records to userspace - * @buf: the userspace, destination buffer - * @count: the number of bytes to copy, requested by userspace - * @offset: zero at the start of the read, updated as the read - * proceeds, it represents how many bytes have been - * copied so far and the buffer offset for copying the - * next record. + /** + * @read: Copy buffered metrics as records to userspace + * **buf**: the userspace, destination buffer + * **count**: the number of bytes to copy, requested by userspace + * **offset**: zero at the start of the read, updated as the read + * proceeds, it represents how many bytes have been copied so far and + * the buffer offset for copying the next record. * - * Copy as many buffered i915 perf samples and records for - * this stream to userspace as will fit in the given buffer. + * Copy as many buffered i915 perf samples and records for this stream + * to userspace as will fit in the given buffer. * - * Only write complete records; returning -ENOSPC if there - * isn't room for a complete record. + * Only write complete records; returning -%ENOSPC if there isn't room + * for a complete record. * - * Return any error condition that results in a short read - * such as -ENOSPC or -EFAULT, even though these may be - * squashed before returning to userspace. + * Return any error condition that results in a short read such as + * -%ENOSPC or -%EFAULT, even though these may be squashed before + * returning to userspace. */ int (*read)(struct i915_perf_stream *stream, char __user *buf, size_t count, size_t *offset); - /* Cleanup any stream specific resources. + /** + * @destroy: Cleanup any stream specific resources. * * The stream will always be disabled before this is called. */ void (*destroy)(struct i915_perf_stream *stream); }; +/** + * struct i915_perf_stream - state for a single open stream FD + */ struct i915_perf_stream { + /** + * @dev_priv: i915 drm device + */ struct drm_i915_private *dev_priv; + /** + * @link: Links the stream into ``&drm_i915_private->streams`` + */ struct list_head link; + /** + * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*` + * properties given when opening a stream, representing the contents + * of a single sample as read() by userspace. + */ u32 sample_flags; + + /** + * @sample_size: Considering the configured contents of a sample + * combined with the required header size, this is the total size + * of a single sample record. + */ int sample_size; + /** + * @ctx: %NULL if measuring system-wide across all contexts or a + * specific context that is being monitored. + */ struct i915_gem_context *ctx; + + /** + * @enabled: Whether the stream is currently enabled, considering + * whether the stream was opened in a disabled state and based + * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls. + */ bool enabled; + /** + * @ops: The callbacks providing the implementation of this specific + * type of configured stream. + */ const struct i915_perf_stream_ops *ops; }; +/** + * struct i915_oa_ops - Gen specific implementation of an OA unit stream + */ struct i915_oa_ops { + /** + * @init_oa_buffer: Resets the head and tail pointers of the + * circular buffer for periodic OA reports. + * + * Called when first opening a stream for OA metrics, but also may be + * called in response to an OA buffer overflow or other error + * condition. + * + * Note it may be necessary to clear the full OA buffer here as part of + * maintaining the invariable that new reports must be written to + * zeroed memory for us to be able to reliable detect if an expected + * report has not yet landed in memory. (At least on Haswell the OA + * buffer tail pointer is not synchronized with reports being visible + * to the CPU) + */ void (*init_oa_buffer)(struct drm_i915_private *dev_priv); + + /** + * @enable_metric_set: Applies any MUX configuration to set up the + * Boolean and Custom (B/C) counters that are part of the counter + * reports being sampled. May apply system constraints such as + * disabling EU clock gating as required. + */ int (*enable_metric_set)(struct drm_i915_private *dev_priv); + + /** + * @disable_metric_set: Remove system constraints associated with using + * the OA unit. + */ void (*disable_metric_set)(struct drm_i915_private *dev_priv); + + /** + * @oa_enable: Enable periodic sampling + */ void (*oa_enable)(struct drm_i915_private *dev_priv); + + /** + * @oa_disable: Disable periodic sampling + */ void (*oa_disable)(struct drm_i915_private *dev_priv); - void (*update_oacontrol)(struct drm_i915_private *dev_priv); - void (*update_hw_ctx_id_locked)(struct drm_i915_private *dev_priv, - u32 ctx_id); + + /** + * @read: Copy data from the circular OA buffer into a given userspace + * buffer. + */ int (*read)(struct i915_perf_stream *stream, char __user *buf, size_t count, size_t *offset); + + /** + * @oa_buffer_is_empty: Check if OA buffer empty (false positives OK) + * + * This is either called via fops or the poll check hrtimer (atomic + * ctx) without any locks taken. + * + * It's safe to read OA config state here unlocked, assuming that this + * is only called while the stream is enabled, while the global OA + * configuration can't be modified. + * + * Efficiency is more important than avoiding some false positives + * here, which will be handled gracefully - likely resulting in an + * %EAGAIN error for userspace. + */ bool (*oa_buffer_is_empty)(struct drm_i915_private *dev_priv); }; diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 0c6e12428194..ae7bd0ed7b1a 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -26,7 +26,7 @@ /** - * DOC: i915 Perf, streaming API for GPU metrics + * DOC: i915 Perf Overview * * Gen graphics supports a large number of performance counters that can help * driver and application developers understand and optimize their use of the @@ -45,6 +45,10 @@ * privileges by default, unless changed via the dev.i915.perf_event_paranoid * sysctl option. * + */ + +/** + * DOC: i915 Perf History and Comparison with Core Perf * * The interface was initially inspired by the core Perf infrastructure but * some notable differences are: @@ -75,8 +79,8 @@ * gets copied from the GPU mapped buffers to userspace buffers. * * - * Some notes regarding Linux Perf: - * -------------------------------- + * Issues hit with first prototype based on Core Perf + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * The first prototype of this driver was based on the core perf * infrastructure, and while we did make that mostly work, with some changes to @@ -135,7 +139,7 @@ * for combining with the side-band raw reports it captures using * MI_REPORT_PERF_COUNT commands. * - * _ As a side note on perf's grouping feature; there was also some concern + * - As a side note on perf's grouping feature; there was also some concern * that using PERF_FORMAT_GROUP as a way to pack together counter values * would quite drastically inflate our sample sizes, which would likely * lower the effective sampling resolutions we could use when the available @@ -277,6 +281,20 @@ static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = { #define SAMPLE_OA_REPORT (1<<0) +/** + * struct perf_open_properties - for validated properties given to open a stream + * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags + * @single_context: Whether a single or all gpu contexts should be monitored + * @ctx_handle: A gem ctx handle for use with @single_context + * @metrics_set: An ID for an OA unit metric set advertised via sysfs + * @oa_format: An OA unit HW report format + * @oa_periodic: Whether to enable periodic OA unit sampling + * @oa_period_exponent: The OA unit sampling period is derived from this + * + * As read_properties_unlocked() enumerates and validates the properties given + * to open a stream of metrics the configuration is built up in the structure + * which starts out zero initialized. + */ struct perf_open_properties { u32 sample_flags; @@ -314,7 +332,19 @@ static bool gen7_oa_buffer_is_empty_fop_unlocked(struct drm_i915_private *dev_pr } /** - * Appends a status record to a userspace read() buffer. + * append_oa_status - Appends a status record to a userspace read() buffer. + * @stream: An i915-perf stream opened for OA metrics + * @buf: destination buffer given by userspace + * @count: the number of bytes userspace wants to read + * @offset: (inout): the current position for writing into @buf + * @type: The kind of status to report to userspace + * + * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`) + * into the userspace read() buffer. + * + * The @buf @offset will only be updated on success. + * + * Returns: 0 on success, negative error code on failure. */ static int append_oa_status(struct i915_perf_stream *stream, char __user *buf, @@ -336,7 +366,21 @@ static int append_oa_status(struct i915_perf_stream *stream, } /** - * Copies single OA report into userspace read() buffer. + * append_oa_sample - Copies single OA report into userspace read() buffer. + * @stream: An i915-perf stream opened for OA metrics + * @buf: destination buffer given by userspace + * @count: the number of bytes userspace wants to read + * @offset: (inout): the current position for writing into @buf + * @report: A single OA report to (optionally) include as part of the sample + * + * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*` + * properties when opening a stream, tracked as `stream->sample_flags`. This + * function copies the requested components of a single sample to the given + * read() @buf. + * + * The @buf @offset will only be updated on success. + * + * Returns: 0 on success, negative error code on failure. */ static int append_oa_sample(struct i915_perf_stream *stream, char __user *buf, @@ -380,10 +424,8 @@ static int append_oa_sample(struct i915_perf_stream *stream, * @head_ptr: (inout): the current oa buffer cpu read position * @tail: the current oa buffer gpu write position * - * Returns 0 on success, negative error code on failure. - * - * Notably any error condition resulting in a short read (-ENOSPC or - * -EFAULT) will be returned even though one or more records may + * Notably any error condition resulting in a short read (-%ENOSPC or + * -%EFAULT) will be returned even though one or more records may * have been successfully copied. In this case it's up to the caller * to decide if the error should be squashed before returning to * userspace. @@ -392,6 +434,8 @@ static int append_oa_sample(struct i915_perf_stream *stream, * tail, so the head chases the tail?... If you think that's mad * and back-to-front you're not alone, but this follows the * Gen PRM naming convention. + * + * Returns: 0 on success, negative error code on failure. */ static int gen7_append_oa_reports(struct i915_perf_stream *stream, char __user *buf, @@ -496,6 +540,22 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream, return ret; } +/** + * gen7_oa_read - copy status records then buffered OA reports + * @stream: An i915-perf stream opened for OA metrics + * @buf: destination buffer given by userspace + * @count: the number of bytes userspace wants to read + * @offset: (inout): the current position for writing into @buf + * + * Checks Gen 7 specific OA unit status registers and if necessary appends + * corresponding status records for userspace (such as for a buffer full + * condition) and then initiate appending any buffered OA reports. + * + * Updates @offset according to the number of bytes successfully copied into + * the userspace buffer. + * + * Returns: zero on success or a negative error code + */ static int gen7_oa_read(struct i915_perf_stream *stream, char __user *buf, size_t count, @@ -597,6 +657,20 @@ static int gen7_oa_read(struct i915_perf_stream *stream, return ret; } +/** + * i915_oa_wait_unlocked - handles blocking IO until OA data available + * @stream: An i915-perf stream opened for OA metrics + * + * Called when userspace tries to read() from a blocking stream FD opened + * for OA metrics. It waits until the hrtimer callback finds a non-empty + * OA buffer and wakes us. + * + * Note: it's acceptable to have this return with some false positives + * since any subsequent read handling will return -EAGAIN if there isn't + * really data ready for userspace yet. + * + * Returns: zero on success or a negative error code + */ static int i915_oa_wait_unlocked(struct i915_perf_stream *stream) { struct drm_i915_private *dev_priv = stream->dev_priv; @@ -615,6 +689,16 @@ static int i915_oa_wait_unlocked(struct i915_perf_stream *stream) !dev_priv->perf.oa.ops.oa_buffer_is_empty(dev_priv)); } +/** + * i915_oa_poll_wait - call poll_wait() for an OA stream poll() + * @stream: An i915-perf stream opened for OA metrics + * @file: An i915 perf stream file + * @wait: poll() state table + * + * For handling userspace polling on an i915 perf stream opened for OA metrics, + * this starts a poll_wait with the wait queue that our hrtimer callback wakes + * when it sees data ready to read in the circular OA buffer. + */ static void i915_oa_poll_wait(struct i915_perf_stream *stream, struct file *file, poll_table *wait) @@ -624,6 +708,18 @@ static void i915_oa_poll_wait(struct i915_perf_stream *stream, poll_wait(file, &dev_priv->perf.oa.poll_wq, wait); } +/** + * i915_oa_read - just calls through to &i915_oa_ops->read + * @stream: An i915-perf stream opened for OA metrics + * @buf: destination buffer given by userspace + * @count: the number of bytes userspace wants to read + * @offset: (inout): the current position for writing into @buf + * + * Updates @offset according to the number of bytes successfully copied into + * the userspace buffer. + * + * Returns: zero on success or a negative error code + */ static int i915_oa_read(struct i915_perf_stream *stream, char __user *buf, size_t count, @@ -634,9 +730,15 @@ static int i915_oa_read(struct i915_perf_stream *stream, return dev_priv->perf.oa.ops.read(stream, buf, count, offset); } -/* Determine the render context hw id, and ensure it remains fixed for the +/** + * oa_get_render_ctx_id - determine and hold ctx hw id + * @stream: An i915-perf stream opened for OA metrics + * + * Determine the render context hw id, and ensure it remains fixed for the * lifetime of the stream. This ensures that we don't have to worry about * updating the context ID in OACONTROL on the fly. + * + * Returns: zero on success or a negative error code */ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) { @@ -673,6 +775,13 @@ unlock: return ret; } +/** + * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold + * @stream: An i915-perf stream opened for OA metrics + * + * In case anything needed doing to ensure the context HW ID would remain valid + * for the lifetime of the stream, then that can be undone here. + */ static void oa_put_render_ctx_id(struct i915_perf_stream *stream) { struct drm_i915_private *dev_priv = stream->dev_priv; @@ -945,6 +1054,15 @@ static void gen7_oa_enable(struct drm_i915_private *dev_priv) spin_unlock_irqrestore(&dev_priv->perf.hook_lock, flags); } +/** + * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream + * @stream: An i915 perf stream opened for OA metrics + * + * [Re]enables hardware periodic sampling according to the period configured + * when opening the stream. This also starts a hrtimer that will periodically + * check for data in the circular OA buffer for notifying userspace (e.g. + * during a read() or poll()). + */ static void i915_oa_stream_enable(struct i915_perf_stream *stream) { struct drm_i915_private *dev_priv = stream->dev_priv; @@ -962,6 +1080,14 @@ static void gen7_oa_disable(struct drm_i915_private *dev_priv) I915_WRITE(GEN7_OACONTROL, 0); } +/** + * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream + * @stream: An i915 perf stream opened for OA metrics + * + * Stops the OA unit from periodically writing counter reports into the + * circular OA buffer. This also stops the hrtimer that periodically checks for + * data in the circular OA buffer, for notifying userspace. + */ static void i915_oa_stream_disable(struct i915_perf_stream *stream) { struct drm_i915_private *dev_priv = stream->dev_priv; @@ -987,6 +1113,24 @@ static const struct i915_perf_stream_ops i915_oa_stream_ops = { .read = i915_oa_read, }; +/** + * i915_oa_stream_init - validate combined props for OA stream and init + * @stream: An i915 perf stream + * @param: The open parameters passed to `DRM_I915_PERF_OPEN` + * @props: The property state that configures stream (individually validated) + * + * While read_properties_unlocked() validates properties in isolation it + * doesn't ensure that the combination necessarily makes sense. + * + * At this point it has been determined that userspace wants a stream of + * OA metrics, but still we need to further validate the combined + * properties are OK. + * + * If the configuration makes sense then we can allocate memory for + * a circular OA buffer and apply the requested metric set configuration. + * + * Returns: zero on success or a negative error code. + */ static int i915_oa_stream_init(struct i915_perf_stream *stream, struct drm_i915_perf_open_param *param, struct perf_open_properties *props) @@ -1111,6 +1255,31 @@ err_oa_buf_alloc: return ret; } +/** + * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation + * @stream: An i915 perf stream + * @file: An i915 perf stream file + * @buf: destination buffer given by userspace + * @count: the number of bytes userspace wants to read + * @ppos: (inout) file seek position (unused) + * + * Besides wrapping &i915_perf_stream_ops->read this provides a common place to + * ensure that if we've successfully copied any data then reporting that takes + * precedence over any internal error status, so the data isn't lost. + * + * For example ret will be -ENOSPC whenever there is more buffered data than + * can be copied to userspace, but that's only interesting if we weren't able + * to copy some data because it implies the userspace buffer is too small to + * receive a single record (and we never split records). + * + * Another case with ret == -EFAULT is more of a grey area since it would seem + * like bad form for userspace to ask us to overrun its buffer, but the user + * knows best: + * + * http://yarchive.net/comp/linux/partial_reads_writes.html + * + * Returns: The number of bytes copied or a negative error code on failure. + */ static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream, struct file *file, char __user *buf, @@ -1126,25 +1295,27 @@ static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream, size_t offset = 0; int ret = stream->ops->read(stream, buf, count, &offset); - /* If we've successfully copied any data then reporting that - * takes precedence over any internal error status, so the - * data isn't lost. - * - * For example ret will be -ENOSPC whenever there is more - * buffered data than can be copied to userspace, but that's - * only interesting if we weren't able to copy some data - * because it implies the userspace buffer is too small to - * receive a single record (and we never split records). - * - * Another case with ret == -EFAULT is more of a grey area - * since it would seem like bad form for userspace to ask us - * to overrun its buffer, but the user knows best: - * - * http://yarchive.net/comp/linux/partial_reads_writes.html - */ return offset ?: (ret ?: -EAGAIN); } +/** + * i915_perf_read - handles read() FOP for i915 perf stream FDs + * @file: An i915 perf stream file + * @buf: destination buffer given by userspace + * @count: the number of bytes userspace wants to read + * @ppos: (inout) file seek position (unused) + * + * The entry point for handling a read() on a stream file descriptor from + * userspace. Most of the work is left to the i915_perf_read_locked() and + * &i915_perf_stream_ops->read but to save having stream implementations (of + * which we might have multiple later) we handle blocking read here. + * + * We can also consistently treat trying to read from a disabled stream + * as an IO error so implementations can assume the stream is enabled + * while reading. + * + * Returns: The number of bytes copied or a negative error code on failure. + */ static ssize_t i915_perf_read(struct file *file, char __user *buf, size_t count, @@ -1211,6 +1382,22 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer) return HRTIMER_RESTART; } +/** + * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream + * @dev_priv: i915 device instance + * @stream: An i915 perf stream + * @file: An i915 perf stream file + * @wait: poll() state table + * + * For handling userspace polling on an i915 perf stream, this calls through to + * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that + * will be woken for new stream data. + * + * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize + * with any non-file-operation driver hooks. + * + * Returns: any poll events that are ready without sleeping + */ static unsigned int i915_perf_poll_locked(struct drm_i915_private *dev_priv, struct i915_perf_stream *stream, struct file *file, @@ -1232,6 +1419,19 @@ static unsigned int i915_perf_poll_locked(struct drm_i915_private *dev_priv, return events; } +/** + * i915_perf_poll - call poll_wait() with a suitable wait queue for stream + * @file: An i915 perf stream file + * @wait: poll() state table + * + * For handling userspace polling on an i915 perf stream, this ensures + * poll_wait() gets called with a wait queue that will be woken for new stream + * data. + * + * Note: Implementation deferred to i915_perf_poll_locked() + * + * Returns: any poll events that are ready without sleeping + */ static unsigned int i915_perf_poll(struct file *file, poll_table *wait) { struct i915_perf_stream *stream = file->private_data; @@ -1245,6 +1445,16 @@ static unsigned int i915_perf_poll(struct file *file, poll_table *wait) return ret; } +/** + * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl + * @stream: A disabled i915 perf stream + * + * [Re]enables the associated capture of data for this stream. + * + * If a stream was previously enabled then there's currently no intention + * to provide userspace any guarantee about the preservation of previously + * buffered data. + */ static void i915_perf_enable_locked(struct i915_perf_stream *stream) { if (stream->enabled) @@ -1257,6 +1467,20 @@ static void i915_perf_enable_locked(struct i915_perf_stream *stream) stream->ops->enable(stream); } +/** + * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl + * @stream: An enabled i915 perf stream + * + * Disables the associated capture of data for this stream. + * + * The intention is that disabling an re-enabling a stream will ideally be + * cheaper than destroying and re-opening a stream with the same configuration, + * though there are no formal guarantees about what state or buffered data + * must be retained between disabling and re-enabling a stream. + * + * Note: while a stream is disabled it's considered an error for userspace + * to attempt to read from the stream (-EIO). + */ static void i915_perf_disable_locked(struct i915_perf_stream *stream) { if (!stream->enabled) @@ -1269,6 +1493,18 @@ static void i915_perf_disable_locked(struct i915_perf_stream *stream) stream->ops->disable(stream); } +/** + * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs + * @stream: An i915 perf stream + * @cmd: the ioctl request + * @arg: the ioctl data + * + * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize + * with any non-file-operation driver hooks. + * + * Returns: zero on success or a negative error code. Returns -EINVAL for + * an unknown ioctl request. + */ static long i915_perf_ioctl_locked(struct i915_perf_stream *stream, unsigned int cmd, unsigned long arg) @@ -1285,6 +1521,17 @@ static long i915_perf_ioctl_locked(struct i915_perf_stream *stream, return -EINVAL; } +/** + * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs + * @file: An i915 perf stream file + * @cmd: the ioctl request + * @arg: the ioctl data + * + * Implementation deferred to i915_perf_ioctl_locked(). + * + * Returns: zero on success or a negative error code. Returns -EINVAL for + * an unknown ioctl request. + */ static long i915_perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) @@ -1300,6 +1547,16 @@ static long i915_perf_ioctl(struct file *file, return ret; } +/** + * i915_perf_destroy_locked - destroy an i915 perf stream + * @stream: An i915 perf stream + * + * Frees all resources associated with the given i915 perf @stream, disabling + * any associated data capture in the process. + * + * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize + * with any non-file-operation driver hooks. + */ static void i915_perf_destroy_locked(struct i915_perf_stream *stream) { struct drm_i915_private *dev_priv = stream->dev_priv; @@ -1321,6 +1578,17 @@ static void i915_perf_destroy_locked(struct i915_perf_stream *stream) kfree(stream); } +/** + * i915_perf_release - handles userspace close() of a stream file + * @inode: anonymous inode associated with file + * @file: An i915 perf stream file + * + * Cleans up any resources associated with an open i915 perf stream file. + * + * NB: close() can't really fail from the userspace point of view. + * + * Returns: zero on success or a negative error code. + */ static int i915_perf_release(struct inode *inode, struct file *file) { struct i915_perf_stream *stream = file->private_data; @@ -1365,6 +1633,30 @@ lookup_context(struct drm_i915_private *dev_priv, return ctx; } +/** + * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD + * @dev_priv: i915 device instance + * @param: The open parameters passed to 'DRM_I915_PERF_OPEN` + * @props: individually validated u64 property value pairs + * @file: drm file + * + * See i915_perf_ioctl_open() for interface details. + * + * Implements further stream config validation and stream initialization on + * behalf of i915_perf_open_ioctl() with the &drm_i915_private->perf.lock mutex + * taken to serialize with any non-file-operation driver hooks. + * + * Note: at this point the @props have only been validated in isolation and + * it's still necessary to validate that the combination of properties makes + * sense. + * + * In the case where userspace is interested in OA unit metrics then further + * config validation and stream initialization details will be handled by + * i915_oa_stream_init(). The code here should only validate config state that + * will be relevant to all stream types / backends. + * + * Returns: zero on success or a negative error code. + */ static int i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, struct drm_i915_perf_open_param *param, @@ -1459,12 +1751,20 @@ err: return ret; } -/* Note we copy the properties from userspace outside of the i915 perf - * mutex to avoid an awkward lockdep with mmap_sem. +/** + * read_properties_unlocked - validate + copy userspace stream open properties + * @dev_priv: i915 device instance + * @uprops: The array of u64 key value pairs given by userspace + * @n_props: The number of key value pairs expected in @uprops + * @props: The stream configuration built up while validating properties * * Note this function only validates properties in isolation it doesn't * validate that the combination of properties makes sense or that all * properties necessary for a particular kind of stream have been set. + * + * Note that there currently aren't any ordering requirements for properties so + * we shouldn't validate or assume anything about ordering here. This doesn't + * rule out defining new properties with ordering requirements in the future. */ static int read_properties_unlocked(struct drm_i915_private *dev_priv, u64 __user *uprops, @@ -1586,6 +1886,30 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv, return 0; } +/** + * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD + * @dev: drm device + * @data: ioctl data copied from userspace (unvalidated) + * @file: drm file + * + * Validates the stream open parameters given by userspace including flags + * and an array of u64 key, value pair properties. + * + * Very little is assumed up front about the nature of the stream being + * opened (for instance we don't assume it's for periodic OA unit metrics). An + * i915-perf stream is expected to be a suitable interface for other forms of + * buffered data written by the GPU besides periodic OA metrics. + * + * Note we copy the properties from userspace outside of the i915 perf + * mutex to avoid an awkward lockdep with mmap_sem. + * + * Most of the implementation details are handled by + * i915_perf_open_ioctl_locked() after taking the &drm_i915_private->perf.lock + * mutex for serializing with any non-file-operation driver hooks. + * + * Return: A newly opened i915 Perf stream file descriptor or negative + * error code on failure. + */ int i915_perf_open_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { @@ -1622,6 +1946,14 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data, return ret; } +/** + * i915_perf_register - exposes i915-perf to userspace + * @dev_priv: i915 device instance + * + * In particular OA metric sets are advertised under a sysfs metrics/ + * directory allowing userspace to enumerate valid IDs that can be + * used to open an i915-perf stream. + */ void i915_perf_register(struct drm_i915_private *dev_priv) { if (!IS_HASWELL(dev_priv)) @@ -1651,6 +1983,15 @@ exit: mutex_unlock(&dev_priv->perf.lock); } +/** + * i915_perf_unregister - hide i915-perf from userspace + * @dev_priv: i915 device instance + * + * i915-perf state cleanup is split up into an 'unregister' and + * 'deinit' phase where the interface is first hidden from + * userspace by i915_perf_unregister() before cleaning up + * remaining state in i915_perf_fini(). + */ void i915_perf_unregister(struct drm_i915_private *dev_priv) { if (!IS_HASWELL(dev_priv)) @@ -1707,6 +2048,15 @@ static struct ctl_table dev_root[] = { {} }; +/** + * i915_perf_init - initialize i915-perf state on module load + * @dev_priv: i915 device instance + * + * Initializes i915-perf state without exposing anything to userspace. + * + * Note: i915-perf initialization is split into an 'init' and 'register' + * phase with the i915_perf_register() exposing state to userspace. + */ void i915_perf_init(struct drm_i915_private *dev_priv) { if (!IS_HASWELL(dev_priv)) @@ -1742,6 +2092,10 @@ void i915_perf_init(struct drm_i915_private *dev_priv) dev_priv->perf.initialized = true; } +/** + * i915_perf_fini - Counter part to i915_perf_init() + * @dev_priv: i915 device instance + */ void i915_perf_fini(struct drm_i915_private *dev_priv) { if (!dev_priv->perf.initialized) -- cgit v1.2.3 From 7e3eb599236db605413b4e047692dab9b600d6c6 Mon Sep 17 00:00:00 2001 From: "Nagaraju, Vathsala" Date: Fri, 9 Dec 2016 23:42:09 +0530 Subject: drm/i915/psr: report psr2 hw enabled from psr2_ctl For PSR2 , as per spec, PSR2_CTL bit 31 to be set. for psr1, bit 31 in SRD_CTL to be set. Reporting "HW Enabled & Active bit" status for psr2 from SRD_CTL gives wrong status. Cc: Rodrigo Vivi Cc: Jim Bride Signed-off-by: vathsala nagaraju Reviewed-by: Rodrigo Vivi Signed-off-by: Rodrigo Vivi Link: http://patchwork.freedesktop.org/patch/msgid/1481307129-29354-1-git-send-email-vathsala.nagaraju@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index a746130c8e32..54e196d9d83e 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2567,9 +2567,12 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) seq_printf(m, "Re-enable work scheduled: %s\n", yesno(work_busy(&dev_priv->psr.work.work))); - if (HAS_DDI(dev_priv)) - enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; - else { + if (HAS_DDI(dev_priv)) { + if (dev_priv->psr.psr2_support) + enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE; + else + enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; + } else { for_each_pipe(dev_priv, pipe) { enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, pipe); -- cgit v1.2.3 From 697cc9c8a2c39a42188840e35e212230a240f9e4 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 11 Dec 2016 20:20:19 +0100 Subject: drm: Simplify GETRESOURCES ioctl Looping twice when we can do it once is silly. Also use a consistent style. Note that there's a good race with the connector list walking, since that is no longer protected by mode_config.mutex. But that's for a later patch to fix. v2: Actually try to not blow up, somehow I lost the hunk that checks we don't copy too much. Noticed by Chris. v3: - squash all drm_mode_getresources cleanups into one - use consistent style for walking objects (Chris) v4: - Use u64_to_user_ptr (Chris) - Don't forget to copy the last connector (Chris) v5: Chris was right ... Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161211192019.29603-1-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_mode_config.c | 111 ++++++++++++++------------------------ 1 file changed, 39 insertions(+), 72 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index 2735a5847ffa..b1e8bbceaf39 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c @@ -84,17 +84,11 @@ int drm_mode_getresources(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_card_res *card_res = data; - struct list_head *lh; struct drm_framebuffer *fb; struct drm_connector *connector; struct drm_crtc *crtc; struct drm_encoder *encoder; - int ret = 0; - int connector_count = 0; - int crtc_count = 0; - int fb_count = 0; - int encoder_count = 0; - int copied = 0; + int count, ret = 0; uint32_t __user *fb_id; uint32_t __user *crtc_id; uint32_t __user *connector_id; @@ -105,89 +99,62 @@ int drm_mode_getresources(struct drm_device *dev, void *data, mutex_lock(&file_priv->fbs_lock); - /* - * For the non-control nodes we need to limit the list of resources - * by IDs in the group list for this node - */ - list_for_each(lh, &file_priv->fbs) - fb_count++; - - /* handle this in 4 parts */ - /* FBs */ - if (card_res->count_fbs >= fb_count) { - copied = 0; - fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr; - list_for_each_entry(fb, &file_priv->fbs, filp_head) { - if (put_user(fb->base.id, fb_id + copied)) { - mutex_unlock(&file_priv->fbs_lock); - return -EFAULT; - } - copied++; + count = 0; + fb_id = u64_to_user_ptr(card_res->fb_id_ptr); + list_for_each_entry(fb, &file_priv->fbs, filp_head) { + if (count < card_res->count_fbs && + put_user(fb->base.id, fb_id + count)) { + mutex_unlock(&file_priv->fbs_lock); + return -EFAULT; } + count++; } - card_res->count_fbs = fb_count; + card_res->count_fbs = count; mutex_unlock(&file_priv->fbs_lock); /* mode_config.mutex protects the connector list against e.g. DP MST * connector hot-adding. CRTC/Plane lists are invariant. */ mutex_lock(&dev->mode_config.mutex); - drm_for_each_crtc(crtc, dev) - crtc_count++; - - drm_for_each_connector(connector, dev) - connector_count++; - - drm_for_each_encoder(encoder, dev) - encoder_count++; - card_res->max_height = dev->mode_config.max_height; card_res->min_height = dev->mode_config.min_height; card_res->max_width = dev->mode_config.max_width; card_res->min_width = dev->mode_config.min_width; - /* CRTCs */ - if (card_res->count_crtcs >= crtc_count) { - copied = 0; - crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr; - drm_for_each_crtc(crtc, dev) { - if (put_user(crtc->base.id, crtc_id + copied)) { - ret = -EFAULT; - goto out; - } - copied++; + count = 0; + crtc_id = u64_to_user_ptr(card_res->crtc_id_ptr); + drm_for_each_crtc(crtc, dev) { + if (count < card_res->count_crtcs && + put_user(crtc->base.id, crtc_id + count)) { + ret = -EFAULT; + goto out; } + count++; } - card_res->count_crtcs = crtc_count; - - /* Encoders */ - if (card_res->count_encoders >= encoder_count) { - copied = 0; - encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr; - drm_for_each_encoder(encoder, dev) { - if (put_user(encoder->base.id, encoder_id + - copied)) { - ret = -EFAULT; - goto out; - } - copied++; + card_res->count_crtcs = count; + + count = 0; + encoder_id = u64_to_user_ptr(card_res->encoder_id_ptr); + drm_for_each_encoder(encoder, dev) { + if (count < card_res->count_encoders && + put_user(encoder->base.id, encoder_id + count)) { + ret = -EFAULT; + goto out; } + count++; } - card_res->count_encoders = encoder_count; - - /* Connectors */ - if (card_res->count_connectors >= connector_count) { - copied = 0; - connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr; - drm_for_each_connector(connector, dev) { - if (put_user(connector->base.id, - connector_id + copied)) { - ret = -EFAULT; - goto out; - } - copied++; + card_res->count_encoders = count; + + count = 0; + connector_id = u64_to_user_ptr(card_res->connector_id_ptr); + drm_for_each_connector(connector, dev) { + if (count < card_res->count_connectors && + put_user(connector->base.id, connector_id + count)) { + ret = -EFAULT; + goto out; } + count++; } - card_res->count_connectors = connector_count; + card_res->count_connectors = count; out: mutex_unlock(&dev->mode_config.mutex); -- cgit v1.2.3 From dbb4351bab0a8440f6b02895c142bce6c30b7097 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 7 Dec 2016 13:34:11 +0000 Subject: drm/i915: Reorder phys backing storage release MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In commit a4f5ea64f0a8 ("drm/i915: Refactor object page API"), I reordered the object->pages teardown to be more friendly wrt to a separate obj->mm.lock. However, I overlooked the phys object and left it with a dangling use-after-free of its phys_handle. Move the allocation of the phys handle to get_pages and it release to put_pages to prevent the invalid access and to improve symmetry. v2: Add commentary about always aligning to page size. Testcase: igt/drv_selftest/objects Reported-by: Ville Syrjälä Fixes: a4f5ea64f0a8 ("drm/i915: Refactor object page API") Signed-off-by: Chris Wilson Cc: Ville Syrjälä Cc: Tvrtko Ursulin Cc: Joonas Lahtinen Reviewed-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/20161207133411.8028-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 53 ++++++++++++++++++++++++++--------------- 1 file changed, 34 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 36183945e61a..f86a71d9fe37 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -176,21 +176,35 @@ static struct sg_table * i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) { struct address_space *mapping = obj->base.filp->f_mapping; - char *vaddr = obj->phys_handle->vaddr; + drm_dma_handle_t *phys; struct sg_table *st; struct scatterlist *sg; + char *vaddr; int i; if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) return ERR_PTR(-EINVAL); + /* Always aligning to the object size, allows a single allocation + * to handle all possible callers, and given typical object sizes, + * the alignment of the buddy allocation will naturally match. + */ + phys = drm_pci_alloc(obj->base.dev, + obj->base.size, + roundup_pow_of_two(obj->base.size)); + if (!phys) + return ERR_PTR(-ENOMEM); + + vaddr = phys->vaddr; for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { struct page *page; char *src; page = shmem_read_mapping_page(mapping, i); - if (IS_ERR(page)) - return ERR_CAST(page); + if (IS_ERR(page)) { + st = ERR_CAST(page); + goto err_phys; + } src = kmap_atomic(page); memcpy(vaddr, src, PAGE_SIZE); @@ -204,21 +218,29 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) i915_gem_chipset_flush(to_i915(obj->base.dev)); st = kmalloc(sizeof(*st), GFP_KERNEL); - if (st == NULL) - return ERR_PTR(-ENOMEM); + if (!st) { + st = ERR_PTR(-ENOMEM); + goto err_phys; + } if (sg_alloc_table(st, 1, GFP_KERNEL)) { kfree(st); - return ERR_PTR(-ENOMEM); + st = ERR_PTR(-ENOMEM); + goto err_phys; } sg = st->sgl; sg->offset = 0; sg->length = obj->base.size; - sg_dma_address(sg) = obj->phys_handle->busaddr; + sg_dma_address(sg) = phys->busaddr; sg_dma_len(sg) = obj->base.size; + obj->phys_handle = phys; + return st; + +err_phys: + drm_pci_free(obj->base.dev, phys); return st; } @@ -274,12 +296,13 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, sg_free_table(pages); kfree(pages); + + drm_pci_free(obj->base.dev, obj->phys_handle); } static void i915_gem_object_release_phys(struct drm_i915_gem_object *obj) { - drm_pci_free(obj->base.dev, obj->phys_handle); i915_gem_object_unpin_pages(obj); } @@ -540,15 +563,13 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) { - drm_dma_handle_t *phys; int ret; - if (obj->phys_handle) { - if ((unsigned long)obj->phys_handle->vaddr & (align -1)) - return -EBUSY; + if (align > obj->base.size) + return -EINVAL; + if (obj->ops == &i915_gem_phys_ops) return 0; - } if (obj->mm.madv != I915_MADV_WILLNEED) return -EFAULT; @@ -564,12 +585,6 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, if (obj->mm.pages) return -EBUSY; - /* create a new object */ - phys = drm_pci_alloc(obj->base.dev, obj->base.size, align); - if (!phys) - return -ENOMEM; - - obj->phys_handle = phys; obj->ops = &i915_gem_phys_ops; return i915_gem_object_pin_pages(obj); -- cgit v1.2.3 From 7155b057c61e55eed27bf478ac9b8034c4c97582 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 9 Dec 2016 15:05:55 +0000 Subject: drm/i915: Retire before attempting to evict from the active lists Some object retain an extra pin whilst they are active (e.g. contexts). This excludes them from being considered for eviction unless we idle the GPU. If before we look at the active list, we retire beforehand we can hopefully remove a few excess pins and reduce the amount of searching required. v2: Similar principle applies to evict_for_vma Signed-off-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/20161209150555.602-1-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/i915_gem_evict.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index fa40100146ea..6457fd0c33a8 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -129,7 +129,14 @@ i915_gem_evict_something(struct i915_address_space *vm, } else drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); - if (flags & PIN_NONBLOCK) + /* Retire before we search the active list. Although we have + * reasonable accuracy in our retirement lists, we may have + * a stray pin (preventing eviction) that can only be resolved by + * retiring. + */ + if (!(flags & PIN_NONBLOCK)) + i915_gem_retire_requests(dev_priv); + else phases[1] = NULL; search_again: @@ -235,6 +242,14 @@ int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags) lockdep_assert_held(&target->vm->i915->drm.struct_mutex); trace_i915_gem_evict_vma(target, flags); + /* Retire before we search the active list. Although we have + * reasonable accuracy in our retirement lists, we may have + * a stray pin (preventing eviction) that can only be resolved by + * retiring. + */ + if (!(flags & PIN_NONBLOCK)) + i915_gem_retire_requests(target->vm->i915); + check_color = target->vm->mm.color_adjust; if (check_color) { /* Expand search to cover neighbouring guard pages (or lack!) */ -- cgit v1.2.3 From add033793c9efa7b0a8ecd2fa192fe85aa8b0854 Mon Sep 17 00:00:00 2001 From: Vidya Srinivas Date: Thu, 8 Dec 2016 11:26:18 +0200 Subject: drm/i915: Parse panel backlight controller from VBT Currently the backlight controller is taken as 0. It needs to derive value from the VBT. Adding the necessary changes. v2 by Jani: - drop obsolete comments, drop redundant initialization (Bob) - merge debug logging into one Signed-off-by: Uma Shankar Signed-off-by: Vidya Srinivas Reviewed-by: Bob Paauwe Tested-by: Bob Paauwe Tested-by: Mika Kahola Signed-off-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/1481189178-426-1-git-send-email-jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/intel_bios.c | 6 ++++-- drivers/gpu/drm/i915/intel_panel.c | 17 +++-------------- 3 files changed, 8 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 40c55c9c9eb4..20bc04d5e617 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1716,6 +1716,7 @@ struct intel_vbt_data { bool present; bool active_low_pwm; u8 min_brightness; /* min_brightness/255 of max */ + u8 controller; /* brightness controller number */ enum intel_backlight_type type; } backlight; diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index eaade27af386..1cf2fa6ec28f 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -330,17 +330,19 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, method = &backlight_data->backlight_control[panel_type]; dev_priv->vbt.backlight.type = method->type; + dev_priv->vbt.backlight.controller = method->controller; } dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; dev_priv->vbt.backlight.min_brightness = entry->min_brightness; DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, " - "active %s, min brightness %u, level %u\n", + "active %s, min brightness %u, level %u, controller %u\n", dev_priv->vbt.backlight.pwm_freq_hz, dev_priv->vbt.backlight.active_low_pwm ? "low" : "high", dev_priv->vbt.backlight.min_brightness, - backlight_data->level[panel_type]); + backlight_data->level[panel_type], + dev_priv->vbt.backlight.controller); } /* Try to find sdvo panel data */ diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 3578b402d412..b01edceac650 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -1039,10 +1039,7 @@ static void bxt_enable_backlight(struct intel_connector *connector) enum pipe pipe = intel_get_pipe_from_connector(connector); u32 pwm_ctl, val; - /* To use 2nd set of backlight registers, utility pin has to be - * enabled with PWM mode. - * The field should only be changed when the utility pin is disabled - */ + /* Controller 1 uses the utility pin. */ if (panel->backlight.controller == 1) { val = I915_READ(UTIL_PIN_CTL); if (val & UTIL_PIN_ENABLE) { @@ -1608,19 +1605,11 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused) struct intel_panel *panel = &connector->panel; u32 pwm_ctl, val; - /* - * For BXT hard coding the Backlight controller to 0. - * TODO : Read the controller value from VBT and generalize - */ - panel->backlight.controller = 0; + panel->backlight.controller = dev_priv->vbt.backlight.controller; pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller)); - /* Keeping the check if controller 1 is to be programmed. - * This will come into affect once the VBT parsing - * is fixed for controller selection, and controller 1 is used - * for a prticular display configuration. - */ + /* Controller 1 uses the utility pin. */ if (panel->backlight.controller == 1) { val = I915_READ(UTIL_PIN_CTL); panel->backlight.util_pin_active_low = -- cgit v1.2.3 From 5acc614ac47465fee6375a9af4740f618830762d Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sat, 10 Dec 2016 22:52:52 +0100 Subject: drm: Protect master->unique with dev->master_mutex No one looks at the major/minor versions except the unique/busid stuff. If we protect that with the master_mutex (since it also affects the unique of each master, oh well) we can mark these two IOCTL with DRM_UNLOCKED. While doing this I realized that the comment for the magic_map is outdated, I've forgotten to update it in: commit d2b34ee62b409a03c6fe43c07b779983be51d017 Author: Daniel Vetter Date: Fri Jun 17 09:33:21 2016 +0200 drm: Protect authmagic with master_mutex Cc: Chris Wilson Cc: Emil Velikov Reviewed-by: Chris Wilson Reviewed-by: Emil Velikov Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161210215255.7765-1-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_ioctl.c | 12 +++++++++--- include/drm/drm_auth.h | 17 +++++++++++++---- 2 files changed, 22 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index fed22c2b98b6..daacef9bf902 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -115,11 +115,15 @@ static int drm_getunique(struct drm_device *dev, void *data, struct drm_unique *u = data; struct drm_master *master = file_priv->master; + mutex_lock(&master->dev->master_mutex); if (u->unique_len >= master->unique_len) { - if (copy_to_user(u->unique, master->unique, master->unique_len)) + if (copy_to_user(u->unique, master->unique, master->unique_len)) { + mutex_unlock(&master->dev->master_mutex); return -EFAULT; + } } u->unique_len = master->unique_len; + mutex_unlock(&master->dev->master_mutex); return 0; } @@ -340,6 +344,7 @@ static int drm_setversion(struct drm_device *dev, void *data, struct drm_file *f struct drm_set_version *sv = data; int if_version, retcode = 0; + mutex_lock(&dev->master_mutex); if (sv->drm_di_major != -1) { if (sv->drm_di_major != DRM_IF_MAJOR || sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) { @@ -374,6 +379,7 @@ done: sv->drm_di_minor = DRM_IF_MINOR; sv->drm_dd_major = dev->driver->major; sv->drm_dd_minor = dev->driver->minor; + mutex_unlock(&dev->master_mutex); return retcode; } @@ -528,7 +534,7 @@ EXPORT_SYMBOL(drm_ioctl_permit); static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED), @@ -536,7 +542,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0), - DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), + DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_UNLOCKED | DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h index 610223b0481b..155588eb8ccf 100644 --- a/include/drm/drm_auth.h +++ b/include/drm/drm_auth.h @@ -33,10 +33,7 @@ * * @refcount: Refcount for this master object. * @dev: Link back to the DRM device - * @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex. - * @unique_len: Length of unique field. Protected by drm_global_mutex. - * @magic_map: Map of used authentication tokens. Protected by struct_mutex. - * @lock: DRI lock information. + * @lock: DRI1 lock information. * @driver_priv: Pointer to driver-private information. * * Note that master structures are only relevant for the legacy/primary device @@ -45,8 +42,20 @@ struct drm_master { struct kref refcount; struct drm_device *dev; + /** + * @unique: Unique identifier: e.g. busid. Protected by struct + * &drm_device master_mutex. + */ char *unique; + /** + * @unique_len: Length of unique field. Protected by struct &drm_device + * master_mutex. + */ int unique_len; + /** + * @magic_map: Map of used authentication tokens. Protected by struct + * &drm_device master_mutex. + */ struct idr magic_map; struct drm_lock_data lock; void *driver_priv; -- cgit v1.2.3 From dcf727ab5d1702abc9510a1a91afbd671e410b52 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sat, 10 Dec 2016 22:52:53 +0100 Subject: drm: setclientcap doesn't need the drm BKL It only updates per-file feature flags. And all the ioctl which change behaviour depending upon these flags (they're all kms features) do _not_ hold the BKL. Therefor this is pure cargo-cult and can be removed. Note that there's a risk that the ioctl will behave inconsistently when userspace is racing with itself, but that's ok. The only thing it's not allowed to do is oops the kernel, and from an audit all places are safe. v2: Clarify that the inconsistency is only when userspace races (Chris). Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161210215255.7765-2-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_ioctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index daacef9bf902..18329fc91c80 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -541,7 +541,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0), + DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_UNLOCKED | DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), -- cgit v1.2.3 From fdd5b877e9ebc2029e1373b4a3cd057329a9ab7a Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sat, 10 Dec 2016 22:52:54 +0100 Subject: drm: Enforce BKL-less ioctls for modern drivers With the last round of changes all ioctls called by modern drivers now have their own locking. Everything else is only allowed for legacy drivers and hence the lack of locking doesn't matter. One exception is nouveau, due to the DRIVER_KMS_LEGACY_CONTEXT flag. But that only works its magic on the context and bufs ioctls. And drm_bufs.c is protected with dev->struct_mutex, and drm_context.c by the same and dev->ctxlist_mutex. That should be all safe, and we can finally mandata drm-bkl-less ioctls for everyone! Reviewed-by: Chris Wilson Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161210215255.7765-3-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_ioctl.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 18329fc91c80..59b691146033 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -735,9 +735,8 @@ long drm_ioctl(struct file *filp, if (ksize > in_size) memset(kdata + in_size, 0, ksize - in_size); - /* Enforce sane locking for modern driver ioctls. Core ioctls are - * too messy still. */ - if ((!drm_core_check_feature(dev, DRIVER_LEGACY) && is_driver_ioctl) || + /* Enforce sane locking for modern driver ioctls. */ + if (!drm_core_check_feature(dev, DRIVER_LEGACY) || (ioctl->flags & DRM_UNLOCKED)) retcode = func(dev, kdata, file_priv); else { -- cgit v1.2.3 From 731035fe8e32de31d3f390370349e843caf1e6b8 Mon Sep 17 00:00:00 2001 From: Tomeu Vizoso Date: Mon, 12 Dec 2016 13:29:48 +0100 Subject: drm/i915/debugfs: Move out pipe CRC code In preparation to using a generic API in the DRM core for continuous CRC generation, move the related code out of i915_debugfs.c into a new file. Eventually, only the Intel-specific code will remain in this new file. v2: Rebased. v6: Rebased. v7: Fix whitespace issue. v9: Have intel_display_crc_init accept a drm_i915_private instead. v12: Rebased. Signed-off-by: Tomeu Vizoso Reviewed-by: Emil Velikov Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1481545788-18194-1-git-send-email-tomeu.vizoso@collabora.com --- drivers/gpu/drm/i915/Makefile | 2 +- drivers/gpu/drm/i915/i915_debugfs.c | 882 +------------------------------ drivers/gpu/drm/i915/intel_drv.h | 5 + drivers/gpu/drm/i915/intel_pipe_crc.c | 939 ++++++++++++++++++++++++++++++++++ 4 files changed, 949 insertions(+), 879 deletions(-) create mode 100644 drivers/gpu/drm/i915/intel_pipe_crc.c (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 3c30916727fb..5196509e71cf 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -24,7 +24,7 @@ i915-y := i915_drv.o \ intel_runtime_pm.o i915-$(CONFIG_COMPAT) += i915_ioc32.o -i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o +i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o # GEM code i915-y += i915_cmd_parser.o \ diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 54e196d9d83e..1fad1235a0c7 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -26,19 +26,9 @@ * */ -#include -#include -#include #include -#include -#include #include -#include -#include #include "intel_drv.h" -#include "intel_ringbuffer.h" -#include -#include "i915_drv.h" static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) { @@ -3546,12 +3536,6 @@ static int i915_drrs_status(struct seq_file *m, void *unused) return 0; } -struct pipe_crc_info { - const char *name; - struct drm_i915_private *dev_priv; - enum pipe pipe; -}; - static int i915_dp_mst_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -3581,844 +3565,6 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused) return 0; } -static int i915_pipe_crc_open(struct inode *inode, struct file *filep) -{ - struct pipe_crc_info *info = inode->i_private; - struct drm_i915_private *dev_priv = info->dev_priv; - struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; - - if (info->pipe >= INTEL_INFO(dev_priv)->num_pipes) - return -ENODEV; - - spin_lock_irq(&pipe_crc->lock); - - if (pipe_crc->opened) { - spin_unlock_irq(&pipe_crc->lock); - return -EBUSY; /* already open */ - } - - pipe_crc->opened = true; - filep->private_data = inode->i_private; - - spin_unlock_irq(&pipe_crc->lock); - - return 0; -} - -static int i915_pipe_crc_release(struct inode *inode, struct file *filep) -{ - struct pipe_crc_info *info = inode->i_private; - struct drm_i915_private *dev_priv = info->dev_priv; - struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; - - spin_lock_irq(&pipe_crc->lock); - pipe_crc->opened = false; - spin_unlock_irq(&pipe_crc->lock); - - return 0; -} - -/* (6 fields, 8 chars each, space separated (5) + '\n') */ -#define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1) -/* account for \'0' */ -#define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1) - -static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc) -{ - assert_spin_locked(&pipe_crc->lock); - return CIRC_CNT(pipe_crc->head, pipe_crc->tail, - INTEL_PIPE_CRC_ENTRIES_NR); -} - -static ssize_t -i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, - loff_t *pos) -{ - struct pipe_crc_info *info = filep->private_data; - struct drm_i915_private *dev_priv = info->dev_priv; - struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; - char buf[PIPE_CRC_BUFFER_LEN]; - int n_entries; - ssize_t bytes_read; - - /* - * Don't allow user space to provide buffers not big enough to hold - * a line of data. - */ - if (count < PIPE_CRC_LINE_LEN) - return -EINVAL; - - if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE) - return 0; - - /* nothing to read */ - spin_lock_irq(&pipe_crc->lock); - while (pipe_crc_data_count(pipe_crc) == 0) { - int ret; - - if (filep->f_flags & O_NONBLOCK) { - spin_unlock_irq(&pipe_crc->lock); - return -EAGAIN; - } - - ret = wait_event_interruptible_lock_irq(pipe_crc->wq, - pipe_crc_data_count(pipe_crc), pipe_crc->lock); - if (ret) { - spin_unlock_irq(&pipe_crc->lock); - return ret; - } - } - - /* We now have one or more entries to read */ - n_entries = count / PIPE_CRC_LINE_LEN; - - bytes_read = 0; - while (n_entries > 0) { - struct intel_pipe_crc_entry *entry = - &pipe_crc->entries[pipe_crc->tail]; - - if (CIRC_CNT(pipe_crc->head, pipe_crc->tail, - INTEL_PIPE_CRC_ENTRIES_NR) < 1) - break; - - BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); - pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); - - bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN, - "%8u %8x %8x %8x %8x %8x\n", - entry->frame, entry->crc[0], - entry->crc[1], entry->crc[2], - entry->crc[3], entry->crc[4]); - - spin_unlock_irq(&pipe_crc->lock); - - if (copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN)) - return -EFAULT; - - user_buf += PIPE_CRC_LINE_LEN; - n_entries--; - - spin_lock_irq(&pipe_crc->lock); - } - - spin_unlock_irq(&pipe_crc->lock); - - return bytes_read; -} - -static const struct file_operations i915_pipe_crc_fops = { - .owner = THIS_MODULE, - .open = i915_pipe_crc_open, - .read = i915_pipe_crc_read, - .release = i915_pipe_crc_release, -}; - -static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = { - { - .name = "i915_pipe_A_crc", - .pipe = PIPE_A, - }, - { - .name = "i915_pipe_B_crc", - .pipe = PIPE_B, - }, - { - .name = "i915_pipe_C_crc", - .pipe = PIPE_C, - }, -}; - -static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor, - enum pipe pipe) -{ - struct drm_i915_private *dev_priv = to_i915(minor->dev); - struct dentry *ent; - struct pipe_crc_info *info = &i915_pipe_crc_data[pipe]; - - info->dev_priv = dev_priv; - ent = debugfs_create_file(info->name, S_IRUGO, root, info, - &i915_pipe_crc_fops); - if (!ent) - return -ENOMEM; - - return drm_add_fake_info_node(minor, ent, info); -} - -static const char * const pipe_crc_sources[] = { - "none", - "plane1", - "plane2", - "pf", - "pipe", - "TV", - "DP-B", - "DP-C", - "DP-D", - "auto", -}; - -static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) -{ - BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX); - return pipe_crc_sources[source]; -} - -static int display_crc_ctl_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - int i; - - for (i = 0; i < I915_MAX_PIPES; i++) - seq_printf(m, "%c %s\n", pipe_name(i), - pipe_crc_source_name(dev_priv->pipe_crc[i].source)); - - return 0; -} - -static int display_crc_ctl_open(struct inode *inode, struct file *file) -{ - return single_open(file, display_crc_ctl_show, inode->i_private); -} - -static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, - uint32_t *val) -{ - if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) - *source = INTEL_PIPE_CRC_SOURCE_PIPE; - - switch (*source) { - case INTEL_PIPE_CRC_SOURCE_PIPE: - *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX; - break; - case INTEL_PIPE_CRC_SOURCE_NONE: - *val = 0; - break; - default: - return -EINVAL; - } - - return 0; -} - -static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv, - enum pipe pipe, - enum intel_pipe_crc_source *source) -{ - struct drm_device *dev = &dev_priv->drm; - struct intel_encoder *encoder; - struct intel_crtc *crtc; - struct intel_digital_port *dig_port; - int ret = 0; - - *source = INTEL_PIPE_CRC_SOURCE_PIPE; - - drm_modeset_lock_all(dev); - for_each_intel_encoder(dev, encoder) { - if (!encoder->base.crtc) - continue; - - crtc = to_intel_crtc(encoder->base.crtc); - - if (crtc->pipe != pipe) - continue; - - switch (encoder->type) { - case INTEL_OUTPUT_TVOUT: - *source = INTEL_PIPE_CRC_SOURCE_TV; - break; - case INTEL_OUTPUT_DP: - case INTEL_OUTPUT_EDP: - dig_port = enc_to_dig_port(&encoder->base); - switch (dig_port->port) { - case PORT_B: - *source = INTEL_PIPE_CRC_SOURCE_DP_B; - break; - case PORT_C: - *source = INTEL_PIPE_CRC_SOURCE_DP_C; - break; - case PORT_D: - *source = INTEL_PIPE_CRC_SOURCE_DP_D; - break; - default: - WARN(1, "nonexisting DP port %c\n", - port_name(dig_port->port)); - break; - } - break; - default: - break; - } - } - drm_modeset_unlock_all(dev); - - return ret; -} - -static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, - enum pipe pipe, - enum intel_pipe_crc_source *source, - uint32_t *val) -{ - bool need_stable_symbols = false; - - if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { - int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source); - if (ret) - return ret; - } - - switch (*source) { - case INTEL_PIPE_CRC_SOURCE_PIPE: - *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV; - break; - case INTEL_PIPE_CRC_SOURCE_DP_B: - *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV; - need_stable_symbols = true; - break; - case INTEL_PIPE_CRC_SOURCE_DP_C: - *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV; - need_stable_symbols = true; - break; - case INTEL_PIPE_CRC_SOURCE_DP_D: - if (!IS_CHERRYVIEW(dev_priv)) - return -EINVAL; - *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV; - need_stable_symbols = true; - break; - case INTEL_PIPE_CRC_SOURCE_NONE: - *val = 0; - break; - default: - return -EINVAL; - } - - /* - * When the pipe CRC tap point is after the transcoders we need - * to tweak symbol-level features to produce a deterministic series of - * symbols for a given frame. We need to reset those features only once - * a frame (instead of every nth symbol): - * - DC-balance: used to ensure a better clock recovery from the data - * link (SDVO) - * - DisplayPort scrambling: used for EMI reduction - */ - if (need_stable_symbols) { - uint32_t tmp = I915_READ(PORT_DFT2_G4X); - - tmp |= DC_BALANCE_RESET_VLV; - switch (pipe) { - case PIPE_A: - tmp |= PIPE_A_SCRAMBLE_RESET; - break; - case PIPE_B: - tmp |= PIPE_B_SCRAMBLE_RESET; - break; - case PIPE_C: - tmp |= PIPE_C_SCRAMBLE_RESET; - break; - default: - return -EINVAL; - } - I915_WRITE(PORT_DFT2_G4X, tmp); - } - - return 0; -} - -static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, - enum pipe pipe, - enum intel_pipe_crc_source *source, - uint32_t *val) -{ - bool need_stable_symbols = false; - - if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { - int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source); - if (ret) - return ret; - } - - switch (*source) { - case INTEL_PIPE_CRC_SOURCE_PIPE: - *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX; - break; - case INTEL_PIPE_CRC_SOURCE_TV: - if (!SUPPORTS_TV(dev_priv)) - return -EINVAL; - *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE; - break; - case INTEL_PIPE_CRC_SOURCE_DP_B: - if (!IS_G4X(dev_priv)) - return -EINVAL; - *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X; - need_stable_symbols = true; - break; - case INTEL_PIPE_CRC_SOURCE_DP_C: - if (!IS_G4X(dev_priv)) - return -EINVAL; - *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X; - need_stable_symbols = true; - break; - case INTEL_PIPE_CRC_SOURCE_DP_D: - if (!IS_G4X(dev_priv)) - return -EINVAL; - *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X; - need_stable_symbols = true; - break; - case INTEL_PIPE_CRC_SOURCE_NONE: - *val = 0; - break; - default: - return -EINVAL; - } - - /* - * When the pipe CRC tap point is after the transcoders we need - * to tweak symbol-level features to produce a deterministic series of - * symbols for a given frame. We need to reset those features only once - * a frame (instead of every nth symbol): - * - DC-balance: used to ensure a better clock recovery from the data - * link (SDVO) - * - DisplayPort scrambling: used for EMI reduction - */ - if (need_stable_symbols) { - uint32_t tmp = I915_READ(PORT_DFT2_G4X); - - WARN_ON(!IS_G4X(dev_priv)); - - I915_WRITE(PORT_DFT_I9XX, - I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET); - - if (pipe == PIPE_A) - tmp |= PIPE_A_SCRAMBLE_RESET; - else - tmp |= PIPE_B_SCRAMBLE_RESET; - - I915_WRITE(PORT_DFT2_G4X, tmp); - } - - return 0; -} - -static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - uint32_t tmp = I915_READ(PORT_DFT2_G4X); - - switch (pipe) { - case PIPE_A: - tmp &= ~PIPE_A_SCRAMBLE_RESET; - break; - case PIPE_B: - tmp &= ~PIPE_B_SCRAMBLE_RESET; - break; - case PIPE_C: - tmp &= ~PIPE_C_SCRAMBLE_RESET; - break; - default: - return; - } - if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) - tmp &= ~DC_BALANCE_RESET_VLV; - I915_WRITE(PORT_DFT2_G4X, tmp); - -} - -static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - uint32_t tmp = I915_READ(PORT_DFT2_G4X); - - if (pipe == PIPE_A) - tmp &= ~PIPE_A_SCRAMBLE_RESET; - else - tmp &= ~PIPE_B_SCRAMBLE_RESET; - I915_WRITE(PORT_DFT2_G4X, tmp); - - if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) { - I915_WRITE(PORT_DFT_I9XX, - I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET); - } -} - -static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, - uint32_t *val) -{ - if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) - *source = INTEL_PIPE_CRC_SOURCE_PIPE; - - switch (*source) { - case INTEL_PIPE_CRC_SOURCE_PLANE1: - *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK; - break; - case INTEL_PIPE_CRC_SOURCE_PLANE2: - *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK; - break; - case INTEL_PIPE_CRC_SOURCE_PIPE: - *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK; - break; - case INTEL_PIPE_CRC_SOURCE_NONE: - *val = 0; - break; - default: - return -EINVAL; - } - - return 0; -} - -static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv, - bool enable) -{ - struct drm_device *dev = &dev_priv->drm; - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A); - struct intel_crtc_state *pipe_config; - struct drm_atomic_state *state; - int ret = 0; - - drm_modeset_lock_all(dev); - state = drm_atomic_state_alloc(dev); - if (!state) { - ret = -ENOMEM; - goto out; - } - - state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base); - pipe_config = intel_atomic_get_crtc_state(state, crtc); - if (IS_ERR(pipe_config)) { - ret = PTR_ERR(pipe_config); - goto out; - } - - pipe_config->pch_pfit.force_thru = enable; - if (pipe_config->cpu_transcoder == TRANSCODER_EDP && - pipe_config->pch_pfit.enabled != enable) - pipe_config->base.connectors_changed = true; - - ret = drm_atomic_commit(state); -out: - WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret); - drm_modeset_unlock_all(dev); - drm_atomic_state_put(state); -} - -static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, - enum pipe pipe, - enum intel_pipe_crc_source *source, - uint32_t *val) -{ - if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) - *source = INTEL_PIPE_CRC_SOURCE_PF; - - switch (*source) { - case INTEL_PIPE_CRC_SOURCE_PLANE1: - *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB; - break; - case INTEL_PIPE_CRC_SOURCE_PLANE2: - *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; - break; - case INTEL_PIPE_CRC_SOURCE_PF: - if (IS_HASWELL(dev_priv) && pipe == PIPE_A) - hsw_trans_edp_pipe_A_crc_wa(dev_priv, true); - - *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; - break; - case INTEL_PIPE_CRC_SOURCE_NONE: - *val = 0; - break; - default: - return -EINVAL; - } - - return 0; -} - -static int pipe_crc_set_source(struct drm_i915_private *dev_priv, - enum pipe pipe, - enum intel_pipe_crc_source source) -{ - struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - enum intel_display_power_domain power_domain; - u32 val = 0; /* shut up gcc */ - int ret; - - if (pipe_crc->source == source) - return 0; - - /* forbid changing the source without going back to 'none' */ - if (pipe_crc->source && source) - return -EINVAL; - - power_domain = POWER_DOMAIN_PIPE(pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) { - DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); - return -EIO; - } - - if (IS_GEN2(dev_priv)) - ret = i8xx_pipe_crc_ctl_reg(&source, &val); - else if (INTEL_GEN(dev_priv) < 5) - ret = i9xx_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val); - else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - ret = vlv_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val); - else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) - ret = ilk_pipe_crc_ctl_reg(&source, &val); - else - ret = ivb_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val); - - if (ret != 0) - goto out; - - /* none -> real source transition */ - if (source) { - struct intel_pipe_crc_entry *entries; - - DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n", - pipe_name(pipe), pipe_crc_source_name(source)); - - entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR, - sizeof(pipe_crc->entries[0]), - GFP_KERNEL); - if (!entries) { - ret = -ENOMEM; - goto out; - } - - /* - * When IPS gets enabled, the pipe CRC changes. Since IPS gets - * enabled and disabled dynamically based on package C states, - * user space can't make reliable use of the CRCs, so let's just - * completely disable it. - */ - hsw_disable_ips(crtc); - - spin_lock_irq(&pipe_crc->lock); - kfree(pipe_crc->entries); - pipe_crc->entries = entries; - pipe_crc->head = 0; - pipe_crc->tail = 0; - spin_unlock_irq(&pipe_crc->lock); - } - - pipe_crc->source = source; - - I915_WRITE(PIPE_CRC_CTL(pipe), val); - POSTING_READ(PIPE_CRC_CTL(pipe)); - - /* real source -> none transition */ - if (source == INTEL_PIPE_CRC_SOURCE_NONE) { - struct intel_pipe_crc_entry *entries; - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, - pipe); - - DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", - pipe_name(pipe)); - - drm_modeset_lock(&crtc->base.mutex, NULL); - if (crtc->base.state->active) - intel_wait_for_vblank(dev_priv, pipe); - drm_modeset_unlock(&crtc->base.mutex); - - spin_lock_irq(&pipe_crc->lock); - entries = pipe_crc->entries; - pipe_crc->entries = NULL; - pipe_crc->head = 0; - pipe_crc->tail = 0; - spin_unlock_irq(&pipe_crc->lock); - - kfree(entries); - - if (IS_G4X(dev_priv)) - g4x_undo_pipe_scramble_reset(dev_priv, pipe); - else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - vlv_undo_pipe_scramble_reset(dev_priv, pipe); - else if (IS_HASWELL(dev_priv) && pipe == PIPE_A) - hsw_trans_edp_pipe_A_crc_wa(dev_priv, false); - - hsw_enable_ips(crtc); - } - - ret = 0; - -out: - intel_display_power_put(dev_priv, power_domain); - - return ret; -} - -/* - * Parse pipe CRC command strings: - * command: wsp* object wsp+ name wsp+ source wsp* - * object: 'pipe' - * name: (A | B | C) - * source: (none | plane1 | plane2 | pf) - * wsp: (#0x20 | #0x9 | #0xA)+ - * - * eg.: - * "pipe A plane1" -> Start CRC computations on plane1 of pipe A - * "pipe A none" -> Stop CRC - */ -static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words) -{ - int n_words = 0; - - while (*buf) { - char *end; - - /* skip leading white space */ - buf = skip_spaces(buf); - if (!*buf) - break; /* end of buffer */ - - /* find end of word */ - for (end = buf; *end && !isspace(*end); end++) - ; - - if (n_words == max_words) { - DRM_DEBUG_DRIVER("too many words, allowed <= %d\n", - max_words); - return -EINVAL; /* ran out of words[] before bytes */ - } - - if (*end) - *end++ = '\0'; - words[n_words++] = buf; - buf = end; - } - - return n_words; -} - -enum intel_pipe_crc_object { - PIPE_CRC_OBJECT_PIPE, -}; - -static const char * const pipe_crc_objects[] = { - "pipe", -}; - -static int -display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++) - if (!strcmp(buf, pipe_crc_objects[i])) { - *o = i; - return 0; - } - - return -EINVAL; -} - -static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe) -{ - const char name = buf[0]; - - if (name < 'A' || name >= pipe_name(I915_MAX_PIPES)) - return -EINVAL; - - *pipe = name - 'A'; - - return 0; -} - -static int -display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++) - if (!strcmp(buf, pipe_crc_sources[i])) { - *s = i; - return 0; - } - - return -EINVAL; -} - -static int display_crc_ctl_parse(struct drm_i915_private *dev_priv, - char *buf, size_t len) -{ -#define N_WORDS 3 - int n_words; - char *words[N_WORDS]; - enum pipe pipe; - enum intel_pipe_crc_object object; - enum intel_pipe_crc_source source; - - n_words = display_crc_ctl_tokenize(buf, words, N_WORDS); - if (n_words != N_WORDS) { - DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n", - N_WORDS); - return -EINVAL; - } - - if (display_crc_ctl_parse_object(words[0], &object) < 0) { - DRM_DEBUG_DRIVER("unknown object %s\n", words[0]); - return -EINVAL; - } - - if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) { - DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]); - return -EINVAL; - } - - if (display_crc_ctl_parse_source(words[2], &source) < 0) { - DRM_DEBUG_DRIVER("unknown source %s\n", words[2]); - return -EINVAL; - } - - return pipe_crc_set_source(dev_priv, pipe, source); -} - -static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf, - size_t len, loff_t *offp) -{ - struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; - char *tmpbuf; - int ret; - - if (len == 0) - return 0; - - if (len > PAGE_SIZE - 1) { - DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n", - PAGE_SIZE); - return -E2BIG; - } - - tmpbuf = kmalloc(len + 1, GFP_KERNEL); - if (!tmpbuf) - return -ENOMEM; - - if (copy_from_user(tmpbuf, ubuf, len)) { - ret = -EFAULT; - goto out; - } - tmpbuf[len] = '\0'; - - ret = display_crc_ctl_parse(dev_priv, tmpbuf, len); - -out: - kfree(tmpbuf); - if (ret < 0) - return ret; - - *offp += len; - return len; -} - -static const struct file_operations i915_display_crc_ctl_fops = { - .owner = THIS_MODULE, - .open = display_crc_ctl_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = display_crc_ctl_write -}; - static ssize_t i915_displayport_test_active_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) @@ -5469,19 +4615,6 @@ static const struct i915_debugfs_files { {"i915_guc_log_control", &i915_guc_log_control_fops} }; -void intel_display_crc_init(struct drm_i915_private *dev_priv) -{ - enum pipe pipe; - - for_each_pipe(dev_priv, pipe) { - struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; - - pipe_crc->opened = false; - spin_lock_init(&pipe_crc->lock); - init_waitqueue_head(&pipe_crc->wq); - } -} - int i915_debugfs_register(struct drm_i915_private *dev_priv) { struct drm_minor *minor = dev_priv->drm.primary; @@ -5491,11 +4624,9 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv) if (ret) return ret; - for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { - ret = i915_pipe_crc_create(minor->debugfs_root, minor, i); - if (ret) - return ret; - } + ret = intel_pipe_crc_create(minor); + if (ret) + return ret; for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { ret = i915_debugfs_create(minor->debugfs_root, minor, @@ -5521,12 +4652,7 @@ void i915_debugfs_unregister(struct drm_i915_private *dev_priv) drm_debugfs_remove_files((struct drm_info_list *)&i915_forcewake_fops, 1, minor); - for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { - struct drm_info_list *info_list = - (struct drm_info_list *)&i915_pipe_crc_data[i]; - - drm_debugfs_remove_files(info_list, 1, minor); - } + intel_pipe_crc_cleanup(minor); for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { struct drm_info_list *info_list = diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 8f4ddca0f521..223d44c4aa93 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1855,4 +1855,9 @@ void intel_color_load_luts(struct drm_crtc_state *crtc_state); bool lspcon_init(struct intel_digital_port *intel_dig_port); void lspcon_resume(struct intel_lspcon *lspcon); void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon); + +/* intel_pipe_crc.c */ +int intel_pipe_crc_create(struct drm_minor *minor); +void intel_pipe_crc_cleanup(struct drm_minor *minor); +extern const struct file_operations i915_display_crc_ctl_fops; #endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c new file mode 100644 index 000000000000..ef0c0e195164 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_pipe_crc.c @@ -0,0 +1,939 @@ +/* + * Copyright © 2013 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Author: Damien Lespiau + * + */ + +#include +#include +#include +#include +#include "intel_drv.h" + +struct pipe_crc_info { + const char *name; + struct drm_i915_private *dev_priv; + enum pipe pipe; +}; + +/* As the drm_debugfs_init() routines are called before dev->dev_private is + * allocated we need to hook into the minor for release. + */ +static int drm_add_fake_info_node(struct drm_minor *minor, + struct dentry *ent, const void *key) +{ + struct drm_info_node *node; + + node = kmalloc(sizeof(*node), GFP_KERNEL); + if (node == NULL) { + debugfs_remove(ent); + return -ENOMEM; + } + + node->minor = minor; + node->dent = ent; + node->info_ent = (void *) key; + + mutex_lock(&minor->debugfs_lock); + list_add(&node->list, &minor->debugfs_list); + mutex_unlock(&minor->debugfs_lock); + + return 0; +} + +static int i915_pipe_crc_open(struct inode *inode, struct file *filep) +{ + struct pipe_crc_info *info = inode->i_private; + struct drm_i915_private *dev_priv = info->dev_priv; + struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; + + if (info->pipe >= INTEL_INFO(dev_priv)->num_pipes) + return -ENODEV; + + spin_lock_irq(&pipe_crc->lock); + + if (pipe_crc->opened) { + spin_unlock_irq(&pipe_crc->lock); + return -EBUSY; /* already open */ + } + + pipe_crc->opened = true; + filep->private_data = inode->i_private; + + spin_unlock_irq(&pipe_crc->lock); + + return 0; +} + +static int i915_pipe_crc_release(struct inode *inode, struct file *filep) +{ + struct pipe_crc_info *info = inode->i_private; + struct drm_i915_private *dev_priv = info->dev_priv; + struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; + + spin_lock_irq(&pipe_crc->lock); + pipe_crc->opened = false; + spin_unlock_irq(&pipe_crc->lock); + + return 0; +} + +/* (6 fields, 8 chars each, space separated (5) + '\n') */ +#define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1) +/* account for \'0' */ +#define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1) + +static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc) +{ + assert_spin_locked(&pipe_crc->lock); + return CIRC_CNT(pipe_crc->head, pipe_crc->tail, + INTEL_PIPE_CRC_ENTRIES_NR); +} + +static ssize_t +i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, + loff_t *pos) +{ + struct pipe_crc_info *info = filep->private_data; + struct drm_i915_private *dev_priv = info->dev_priv; + struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; + char buf[PIPE_CRC_BUFFER_LEN]; + int n_entries; + ssize_t bytes_read; + + /* + * Don't allow user space to provide buffers not big enough to hold + * a line of data. + */ + if (count < PIPE_CRC_LINE_LEN) + return -EINVAL; + + if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE) + return 0; + + /* nothing to read */ + spin_lock_irq(&pipe_crc->lock); + while (pipe_crc_data_count(pipe_crc) == 0) { + int ret; + + if (filep->f_flags & O_NONBLOCK) { + spin_unlock_irq(&pipe_crc->lock); + return -EAGAIN; + } + + ret = wait_event_interruptible_lock_irq(pipe_crc->wq, + pipe_crc_data_count(pipe_crc), pipe_crc->lock); + if (ret) { + spin_unlock_irq(&pipe_crc->lock); + return ret; + } + } + + /* We now have one or more entries to read */ + n_entries = count / PIPE_CRC_LINE_LEN; + + bytes_read = 0; + while (n_entries > 0) { + struct intel_pipe_crc_entry *entry = + &pipe_crc->entries[pipe_crc->tail]; + + if (CIRC_CNT(pipe_crc->head, pipe_crc->tail, + INTEL_PIPE_CRC_ENTRIES_NR) < 1) + break; + + BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); + pipe_crc->tail = (pipe_crc->tail + 1) & + (INTEL_PIPE_CRC_ENTRIES_NR - 1); + + bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN, + "%8u %8x %8x %8x %8x %8x\n", + entry->frame, entry->crc[0], + entry->crc[1], entry->crc[2], + entry->crc[3], entry->crc[4]); + + spin_unlock_irq(&pipe_crc->lock); + + if (copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN)) + return -EFAULT; + + user_buf += PIPE_CRC_LINE_LEN; + n_entries--; + + spin_lock_irq(&pipe_crc->lock); + } + + spin_unlock_irq(&pipe_crc->lock); + + return bytes_read; +} + +static const struct file_operations i915_pipe_crc_fops = { + .owner = THIS_MODULE, + .open = i915_pipe_crc_open, + .read = i915_pipe_crc_read, + .release = i915_pipe_crc_release, +}; + +static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = { + { + .name = "i915_pipe_A_crc", + .pipe = PIPE_A, + }, + { + .name = "i915_pipe_B_crc", + .pipe = PIPE_B, + }, + { + .name = "i915_pipe_C_crc", + .pipe = PIPE_C, + }, +}; + +static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor, + enum pipe pipe) +{ + struct drm_i915_private *dev_priv = to_i915(minor->dev); + struct dentry *ent; + struct pipe_crc_info *info = &i915_pipe_crc_data[pipe]; + + info->dev_priv = dev_priv; + ent = debugfs_create_file(info->name, S_IRUGO, root, info, + &i915_pipe_crc_fops); + if (!ent) + return -ENOMEM; + + return drm_add_fake_info_node(minor, ent, info); +} + +static const char * const pipe_crc_sources[] = { + "none", + "plane1", + "plane2", + "pf", + "pipe", + "TV", + "DP-B", + "DP-C", + "DP-D", + "auto", +}; + +static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) +{ + BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX); + return pipe_crc_sources[source]; +} + +static int display_crc_ctl_show(struct seq_file *m, void *data) +{ + struct drm_i915_private *dev_priv = m->private; + int i; + + for (i = 0; i < I915_MAX_PIPES; i++) + seq_printf(m, "%c %s\n", pipe_name(i), + pipe_crc_source_name(dev_priv->pipe_crc[i].source)); + + return 0; +} + +static int display_crc_ctl_open(struct inode *inode, struct file *file) +{ + return single_open(file, display_crc_ctl_show, inode->i_private); +} + +static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, + uint32_t *val) +{ + if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) + *source = INTEL_PIPE_CRC_SOURCE_PIPE; + + switch (*source) { + case INTEL_PIPE_CRC_SOURCE_PIPE: + *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX; + break; + case INTEL_PIPE_CRC_SOURCE_NONE: + *val = 0; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv, + enum pipe pipe, + enum intel_pipe_crc_source *source) +{ + struct drm_device *dev = &dev_priv->drm; + struct intel_encoder *encoder; + struct intel_crtc *crtc; + struct intel_digital_port *dig_port; + int ret = 0; + + *source = INTEL_PIPE_CRC_SOURCE_PIPE; + + drm_modeset_lock_all(dev); + for_each_intel_encoder(dev, encoder) { + if (!encoder->base.crtc) + continue; + + crtc = to_intel_crtc(encoder->base.crtc); + + if (crtc->pipe != pipe) + continue; + + switch (encoder->type) { + case INTEL_OUTPUT_TVOUT: + *source = INTEL_PIPE_CRC_SOURCE_TV; + break; + case INTEL_OUTPUT_DP: + case INTEL_OUTPUT_EDP: + dig_port = enc_to_dig_port(&encoder->base); + switch (dig_port->port) { + case PORT_B: + *source = INTEL_PIPE_CRC_SOURCE_DP_B; + break; + case PORT_C: + *source = INTEL_PIPE_CRC_SOURCE_DP_C; + break; + case PORT_D: + *source = INTEL_PIPE_CRC_SOURCE_DP_D; + break; + default: + WARN(1, "nonexisting DP port %c\n", + port_name(dig_port->port)); + break; + } + break; + default: + break; + } + } + drm_modeset_unlock_all(dev); + + return ret; +} + +static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, + enum pipe pipe, + enum intel_pipe_crc_source *source, + uint32_t *val) +{ + bool need_stable_symbols = false; + + if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { + int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source); + if (ret) + return ret; + } + + switch (*source) { + case INTEL_PIPE_CRC_SOURCE_PIPE: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV; + break; + case INTEL_PIPE_CRC_SOURCE_DP_B: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV; + need_stable_symbols = true; + break; + case INTEL_PIPE_CRC_SOURCE_DP_C: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV; + need_stable_symbols = true; + break; + case INTEL_PIPE_CRC_SOURCE_DP_D: + if (!IS_CHERRYVIEW(dev_priv)) + return -EINVAL; + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV; + need_stable_symbols = true; + break; + case INTEL_PIPE_CRC_SOURCE_NONE: + *val = 0; + break; + default: + return -EINVAL; + } + + /* + * When the pipe CRC tap point is after the transcoders we need + * to tweak symbol-level features to produce a deterministic series of + * symbols for a given frame. We need to reset those features only once + * a frame (instead of every nth symbol): + * - DC-balance: used to ensure a better clock recovery from the data + * link (SDVO) + * - DisplayPort scrambling: used for EMI reduction + */ + if (need_stable_symbols) { + uint32_t tmp = I915_READ(PORT_DFT2_G4X); + + tmp |= DC_BALANCE_RESET_VLV; + switch (pipe) { + case PIPE_A: + tmp |= PIPE_A_SCRAMBLE_RESET; + break; + case PIPE_B: + tmp |= PIPE_B_SCRAMBLE_RESET; + break; + case PIPE_C: + tmp |= PIPE_C_SCRAMBLE_RESET; + break; + default: + return -EINVAL; + } + I915_WRITE(PORT_DFT2_G4X, tmp); + } + + return 0; +} + +static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, + enum pipe pipe, + enum intel_pipe_crc_source *source, + uint32_t *val) +{ + bool need_stable_symbols = false; + + if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { + int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source); + if (ret) + return ret; + } + + switch (*source) { + case INTEL_PIPE_CRC_SOURCE_PIPE: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX; + break; + case INTEL_PIPE_CRC_SOURCE_TV: + if (!SUPPORTS_TV(dev_priv)) + return -EINVAL; + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE; + break; + case INTEL_PIPE_CRC_SOURCE_DP_B: + if (!IS_G4X(dev_priv)) + return -EINVAL; + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X; + need_stable_symbols = true; + break; + case INTEL_PIPE_CRC_SOURCE_DP_C: + if (!IS_G4X(dev_priv)) + return -EINVAL; + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X; + need_stable_symbols = true; + break; + case INTEL_PIPE_CRC_SOURCE_DP_D: + if (!IS_G4X(dev_priv)) + return -EINVAL; + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X; + need_stable_symbols = true; + break; + case INTEL_PIPE_CRC_SOURCE_NONE: + *val = 0; + break; + default: + return -EINVAL; + } + + /* + * When the pipe CRC tap point is after the transcoders we need + * to tweak symbol-level features to produce a deterministic series of + * symbols for a given frame. We need to reset those features only once + * a frame (instead of every nth symbol): + * - DC-balance: used to ensure a better clock recovery from the data + * link (SDVO) + * - DisplayPort scrambling: used for EMI reduction + */ + if (need_stable_symbols) { + uint32_t tmp = I915_READ(PORT_DFT2_G4X); + + WARN_ON(!IS_G4X(dev_priv)); + + I915_WRITE(PORT_DFT_I9XX, + I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET); + + if (pipe == PIPE_A) + tmp |= PIPE_A_SCRAMBLE_RESET; + else + tmp |= PIPE_B_SCRAMBLE_RESET; + + I915_WRITE(PORT_DFT2_G4X, tmp); + } + + return 0; +} + +static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + uint32_t tmp = I915_READ(PORT_DFT2_G4X); + + switch (pipe) { + case PIPE_A: + tmp &= ~PIPE_A_SCRAMBLE_RESET; + break; + case PIPE_B: + tmp &= ~PIPE_B_SCRAMBLE_RESET; + break; + case PIPE_C: + tmp &= ~PIPE_C_SCRAMBLE_RESET; + break; + default: + return; + } + if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) + tmp &= ~DC_BALANCE_RESET_VLV; + I915_WRITE(PORT_DFT2_G4X, tmp); + +} + +static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + uint32_t tmp = I915_READ(PORT_DFT2_G4X); + + if (pipe == PIPE_A) + tmp &= ~PIPE_A_SCRAMBLE_RESET; + else + tmp &= ~PIPE_B_SCRAMBLE_RESET; + I915_WRITE(PORT_DFT2_G4X, tmp); + + if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) { + I915_WRITE(PORT_DFT_I9XX, + I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET); + } +} + +static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, + uint32_t *val) +{ + if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) + *source = INTEL_PIPE_CRC_SOURCE_PIPE; + + switch (*source) { + case INTEL_PIPE_CRC_SOURCE_PLANE1: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK; + break; + case INTEL_PIPE_CRC_SOURCE_PLANE2: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK; + break; + case INTEL_PIPE_CRC_SOURCE_PIPE: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK; + break; + case INTEL_PIPE_CRC_SOURCE_NONE: + *val = 0; + break; + default: + return -EINVAL; + } + + return 0; +} + +static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv, + bool enable) +{ + struct drm_device *dev = &dev_priv->drm; + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A); + struct intel_crtc_state *pipe_config; + struct drm_atomic_state *state; + int ret = 0; + + drm_modeset_lock_all(dev); + state = drm_atomic_state_alloc(dev); + if (!state) { + ret = -ENOMEM; + goto out; + } + + state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base); + pipe_config = intel_atomic_get_crtc_state(state, crtc); + if (IS_ERR(pipe_config)) { + ret = PTR_ERR(pipe_config); + goto out; + } + + pipe_config->pch_pfit.force_thru = enable; + if (pipe_config->cpu_transcoder == TRANSCODER_EDP && + pipe_config->pch_pfit.enabled != enable) + pipe_config->base.connectors_changed = true; + + ret = drm_atomic_commit(state); +out: + WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret); + drm_modeset_unlock_all(dev); + drm_atomic_state_put(state); +} + +static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, + enum pipe pipe, + enum intel_pipe_crc_source *source, + uint32_t *val) +{ + if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) + *source = INTEL_PIPE_CRC_SOURCE_PF; + + switch (*source) { + case INTEL_PIPE_CRC_SOURCE_PLANE1: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB; + break; + case INTEL_PIPE_CRC_SOURCE_PLANE2: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; + break; + case INTEL_PIPE_CRC_SOURCE_PF: + if (IS_HASWELL(dev_priv) && pipe == PIPE_A) + hsw_trans_edp_pipe_A_crc_wa(dev_priv, true); + + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; + break; + case INTEL_PIPE_CRC_SOURCE_NONE: + *val = 0; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int pipe_crc_set_source(struct drm_i915_private *dev_priv, + enum pipe pipe, + enum intel_pipe_crc_source source) +{ + struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + enum intel_display_power_domain power_domain; + u32 val = 0; /* shut up gcc */ + int ret; + + if (pipe_crc->source == source) + return 0; + + /* forbid changing the source without going back to 'none' */ + if (pipe_crc->source && source) + return -EINVAL; + + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) { + DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); + return -EIO; + } + + if (IS_GEN2(dev_priv)) + ret = i8xx_pipe_crc_ctl_reg(&source, &val); + else if (INTEL_GEN(dev_priv) < 5) + ret = i9xx_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val); + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + ret = vlv_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val); + else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) + ret = ilk_pipe_crc_ctl_reg(&source, &val); + else + ret = ivb_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val); + + if (ret != 0) + goto out; + + /* none -> real source transition */ + if (source) { + struct intel_pipe_crc_entry *entries; + + DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n", + pipe_name(pipe), pipe_crc_source_name(source)); + + entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR, + sizeof(pipe_crc->entries[0]), + GFP_KERNEL); + if (!entries) { + ret = -ENOMEM; + goto out; + } + + /* + * When IPS gets enabled, the pipe CRC changes. Since IPS gets + * enabled and disabled dynamically based on package C states, + * user space can't make reliable use of the CRCs, so let's just + * completely disable it. + */ + hsw_disable_ips(crtc); + + spin_lock_irq(&pipe_crc->lock); + kfree(pipe_crc->entries); + pipe_crc->entries = entries; + pipe_crc->head = 0; + pipe_crc->tail = 0; + spin_unlock_irq(&pipe_crc->lock); + } + + pipe_crc->source = source; + + I915_WRITE(PIPE_CRC_CTL(pipe), val); + POSTING_READ(PIPE_CRC_CTL(pipe)); + + /* real source -> none transition */ + if (source == INTEL_PIPE_CRC_SOURCE_NONE) { + struct intel_pipe_crc_entry *entries; + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, + pipe); + + DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", + pipe_name(pipe)); + + drm_modeset_lock(&crtc->base.mutex, NULL); + if (crtc->base.state->active) + intel_wait_for_vblank(dev_priv, pipe); + drm_modeset_unlock(&crtc->base.mutex); + + spin_lock_irq(&pipe_crc->lock); + entries = pipe_crc->entries; + pipe_crc->entries = NULL; + pipe_crc->head = 0; + pipe_crc->tail = 0; + spin_unlock_irq(&pipe_crc->lock); + + kfree(entries); + + if (IS_G4X(dev_priv)) + g4x_undo_pipe_scramble_reset(dev_priv, pipe); + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + vlv_undo_pipe_scramble_reset(dev_priv, pipe); + else if (IS_HASWELL(dev_priv) && pipe == PIPE_A) + hsw_trans_edp_pipe_A_crc_wa(dev_priv, false); + + hsw_enable_ips(crtc); + } + + ret = 0; + +out: + intel_display_power_put(dev_priv, power_domain); + + return ret; +} + +/* + * Parse pipe CRC command strings: + * command: wsp* object wsp+ name wsp+ source wsp* + * object: 'pipe' + * name: (A | B | C) + * source: (none | plane1 | plane2 | pf) + * wsp: (#0x20 | #0x9 | #0xA)+ + * + * eg.: + * "pipe A plane1" -> Start CRC computations on plane1 of pipe A + * "pipe A none" -> Stop CRC + */ +static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words) +{ + int n_words = 0; + + while (*buf) { + char *end; + + /* skip leading white space */ + buf = skip_spaces(buf); + if (!*buf) + break; /* end of buffer */ + + /* find end of word */ + for (end = buf; *end && !isspace(*end); end++) + ; + + if (n_words == max_words) { + DRM_DEBUG_DRIVER("too many words, allowed <= %d\n", + max_words); + return -EINVAL; /* ran out of words[] before bytes */ + } + + if (*end) + *end++ = '\0'; + words[n_words++] = buf; + buf = end; + } + + return n_words; +} + +enum intel_pipe_crc_object { + PIPE_CRC_OBJECT_PIPE, +}; + +static const char * const pipe_crc_objects[] = { + "pipe", +}; + +static int +display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++) + if (!strcmp(buf, pipe_crc_objects[i])) { + *o = i; + return 0; + } + + return -EINVAL; +} + +static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe) +{ + const char name = buf[0]; + + if (name < 'A' || name >= pipe_name(I915_MAX_PIPES)) + return -EINVAL; + + *pipe = name - 'A'; + + return 0; +} + +static int +display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++) + if (!strcmp(buf, pipe_crc_sources[i])) { + *s = i; + return 0; + } + + return -EINVAL; +} + +static int display_crc_ctl_parse(struct drm_i915_private *dev_priv, + char *buf, size_t len) +{ +#define N_WORDS 3 + int n_words; + char *words[N_WORDS]; + enum pipe pipe; + enum intel_pipe_crc_object object; + enum intel_pipe_crc_source source; + + n_words = display_crc_ctl_tokenize(buf, words, N_WORDS); + if (n_words != N_WORDS) { + DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n", + N_WORDS); + return -EINVAL; + } + + if (display_crc_ctl_parse_object(words[0], &object) < 0) { + DRM_DEBUG_DRIVER("unknown object %s\n", words[0]); + return -EINVAL; + } + + if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) { + DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]); + return -EINVAL; + } + + if (display_crc_ctl_parse_source(words[2], &source) < 0) { + DRM_DEBUG_DRIVER("unknown source %s\n", words[2]); + return -EINVAL; + } + + return pipe_crc_set_source(dev_priv, pipe, source); +} + +static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_i915_private *dev_priv = m->private; + char *tmpbuf; + int ret; + + if (len == 0) + return 0; + + if (len > PAGE_SIZE - 1) { + DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n", + PAGE_SIZE); + return -E2BIG; + } + + tmpbuf = kmalloc(len + 1, GFP_KERNEL); + if (!tmpbuf) + return -ENOMEM; + + if (copy_from_user(tmpbuf, ubuf, len)) { + ret = -EFAULT; + goto out; + } + tmpbuf[len] = '\0'; + + ret = display_crc_ctl_parse(dev_priv, tmpbuf, len); + +out: + kfree(tmpbuf); + if (ret < 0) + return ret; + + *offp += len; + return len; +} + +const struct file_operations i915_display_crc_ctl_fops = { + .owner = THIS_MODULE, + .open = display_crc_ctl_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = display_crc_ctl_write +}; + +void intel_display_crc_init(struct drm_i915_private *dev_priv) +{ + enum pipe pipe; + + for_each_pipe(dev_priv, pipe) { + struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; + + pipe_crc->opened = false; + spin_lock_init(&pipe_crc->lock); + init_waitqueue_head(&pipe_crc->wq); + } +} + +int intel_pipe_crc_create(struct drm_minor *minor) +{ + int ret, i; + + for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { + ret = i915_pipe_crc_create(minor->debugfs_root, minor, i); + if (ret) + return ret; + } + + return 0; +} + +void intel_pipe_crc_cleanup(struct drm_minor *minor) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { + struct drm_info_list *info_list = + (struct drm_info_list *)&i915_pipe_crc_data[i]; + + drm_debugfs_remove_files(info_list, 1, minor); + } +} -- cgit v1.2.3 From 38bf57fa7b671f2bf7530888aa8199450265c5c8 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 9 Dec 2016 15:19:38 +0100 Subject: drm/hisilicon: Don't set drm_device->platformdev It's deprecated and only should be used by drivers which still use drm_platform_init, not by anyone else. And indeed it's entirely unused and can be nuked. This required a bit more fudging, but I guess kirin_dc_ops really wants to operate on the platform_device, not something else. Also bonus points for implementing abstraction, and then storing the vfunc in a global variable. v2: Don't break the build soooo badly :( Note that the cleanup function is a bit confused: ade_data was never set as drvdata, and calling drm_crtc_cleanup directly is a bug - this is called indirectly through drm_mode_config_cleanup, which calls into crtc->destroy, which already has the call to drm_crtc_cleanup. Which means we can just nuke it. Note this is the 2nd attempt after the first one failed and had to be reverted again in commit 9cd2e854d61ccfa51686f3ed7b0c917708fc641f Author: Daniel Vetter Date: Wed Aug 17 13:59:40 2016 +0200 Revert "drm/hisilicon: Don't set drm_device->platformdev" Cc: Xinliang Liu Cc: Xinwei Kong Cc: Archit Taneja Cc: Sean Paul Reviewed-by: Sean Paul Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161209141944.22121-1-daniel.vetter@ffwll.ch --- drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c | 11 +++-------- drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c | 8 +++----- drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h | 4 ++-- 3 files changed, 8 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index afc2b5d2d5f0..3ea70459b901 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -973,9 +973,9 @@ static int ade_dts_parse(struct platform_device *pdev, struct ade_hw_ctx *ctx) return 0; } -static int ade_drm_init(struct drm_device *dev) +static int ade_drm_init(struct platform_device *pdev) { - struct platform_device *pdev = dev->platformdev; + struct drm_device *dev = platform_get_drvdata(pdev); struct ade_data *ade; struct ade_hw_ctx *ctx; struct ade_crtc *acrtc; @@ -1034,13 +1034,8 @@ static int ade_drm_init(struct drm_device *dev) return 0; } -static void ade_drm_cleanup(struct drm_device *dev) +static void ade_drm_cleanup(struct platform_device *pdev) { - struct platform_device *pdev = dev->platformdev; - struct ade_data *ade = platform_get_drvdata(pdev); - struct drm_crtc *crtc = &ade->acrtc.base; - - drm_crtc_cleanup(crtc); } const struct kirin_dc_ops ade_dc_ops = { diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c index ebd5f4fe4c23..fa228b7b022c 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c @@ -42,7 +42,7 @@ static int kirin_drm_kms_cleanup(struct drm_device *dev) #endif drm_kms_helper_poll_fini(dev); drm_vblank_cleanup(dev); - dc_ops->cleanup(dev); + dc_ops->cleanup(to_platform_device(dev->dev)); drm_mode_config_cleanup(dev); devm_kfree(dev->dev, priv); dev->dev_private = NULL; @@ -104,7 +104,7 @@ static int kirin_drm_kms_init(struct drm_device *dev) kirin_drm_mode_config_init(dev); /* display controller init */ - ret = dc_ops->init(dev); + ret = dc_ops->init(to_platform_device(dev->dev)); if (ret) goto err_mode_config_cleanup; @@ -138,7 +138,7 @@ static int kirin_drm_kms_init(struct drm_device *dev) err_unbind_all: component_unbind_all(dev->dev, dev); err_dc_cleanup: - dc_ops->cleanup(dev); + dc_ops->cleanup(to_platform_device(dev->dev)); err_mode_config_cleanup: drm_mode_config_cleanup(dev); devm_kfree(dev->dev, priv); @@ -209,8 +209,6 @@ static int kirin_drm_bind(struct device *dev) if (IS_ERR(drm_dev)) return PTR_ERR(drm_dev); - drm_dev->platformdev = to_platform_device(dev); - ret = kirin_drm_kms_init(drm_dev); if (ret) goto err_drm_dev_unref; diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h index 1a07caf8e7f4..a0bb217c4c64 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h @@ -15,8 +15,8 @@ /* display controller init/cleanup ops */ struct kirin_dc_ops { - int (*init)(struct drm_device *dev); - void (*cleanup)(struct drm_device *dev); + int (*init)(struct platform_device *pdev); + void (*cleanup)(struct platform_device *pdev); }; struct kirin_drm_private { -- cgit v1.2.3 From 24a367348a017555f982a9ee137070a7a821fa97 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 9 Dec 2016 19:53:06 +0100 Subject: dma-buf: Update kerneldoc for sync_file_create This was missed when adding a dma_fence_get call. While at it also remove the kerneldoc for the static inline helper - no point documenting internals down to every detail. Fixes: 30cd85dd6edc ("dma-buf/sync_file: hold reference to fence when creating sync_file") Cc: Gustavo Padovan Cc: Sean Paul Cc: linux-doc@vger.kernel.org Cc: Jonathan Corbet Cc: Sumit Semwal Signed-off-by: Daniel Vetter Reviewed-by: Gustavo Padovan Signed-off-by: Sumit Semwal Link: http://patchwork.freedesktop.org/patch/msgid/20161209185309.1682-3-daniel.vetter@ffwll.ch --- drivers/dma-buf/sync_file.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 6d802f2d2881..d5179d7e8575 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -67,9 +67,10 @@ static void fence_check_cb_func(struct dma_fence *f, struct dma_fence_cb *cb) * sync_file_create() - creates a sync file * @fence: fence to add to the sync_fence * - * Creates a sync_file containg @fence. Once this is called, the sync_file - * takes ownership of @fence. The sync_file can be released with - * fput(sync_file->file). Returns the sync_file or NULL in case of error. + * Creates a sync_file containg @fence. This function acquires and additional + * reference of @fence for the newly-created &sync_file, if it succeeds. The + * sync_file can be released with fput(sync_file->file). Returns the + * sync_file or NULL in case of error. */ struct sync_file *sync_file_create(struct dma_fence *fence) { @@ -90,13 +91,6 @@ struct sync_file *sync_file_create(struct dma_fence *fence) } EXPORT_SYMBOL(sync_file_create); -/** - * sync_file_fdget() - get a sync_file from an fd - * @fd: fd referencing a fence - * - * Ensures @fd references a valid sync_file, increments the refcount of the - * backing file. Returns the sync_file or NULL in case of error. - */ static struct sync_file *sync_file_fdget(int fd) { struct file *file = fget(fd); -- cgit v1.2.3 From 2904a8c1311f02896635fd35744262413a0b2726 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 9 Dec 2016 19:53:07 +0100 Subject: dma-buf: Reorganize device dma access docs - Put the initial overview for dma-buf into dma-buf.rst. - Put all the comments about detailed semantics into the right kernel-doc comment for functions or ops structure member. - To allow that detail, switch the reworked kerneldoc to inline style for dma_buf_ops. - Tie everything together into a much more streamlined overview comment, relying on the hyperlinks for all the details. - Also sprinkle some links into the kerneldoc for dma_buf and dma_buf_attachment to tie it all together. Cc: linux-doc@vger.kernel.org Cc: Jonathan Corbet Cc: Sumit Semwal Signed-off-by: Daniel Vetter Signed-off-by: Sumit Semwal Link: http://patchwork.freedesktop.org/patch/msgid/20161209185309.1682-4-daniel.vetter@ffwll.ch --- Documentation/dma-buf-sharing.txt | 222 ----------------------------------- Documentation/driver-api/dma-buf.rst | 38 ++++++ drivers/dma-buf/dma-buf.c | 64 +++++++++- drivers/dma-buf/sync_file.c | 1 - include/linux/dma-buf.h | 133 +++++++++++++++++---- 5 files changed, 207 insertions(+), 251 deletions(-) (limited to 'drivers') diff --git a/Documentation/dma-buf-sharing.txt b/Documentation/dma-buf-sharing.txt index ca44c5820585..dca2fb7ac3b4 100644 --- a/Documentation/dma-buf-sharing.txt +++ b/Documentation/dma-buf-sharing.txt @@ -5,228 +5,6 @@ -This document serves as a guide to device-driver writers on what is the dma-buf -buffer sharing API, how to use it for exporting and using shared buffers. - -Any device driver which wishes to be a part of DMA buffer sharing, can do so as -either the 'exporter' of buffers, or the 'user' of buffers. - -Say a driver A wants to use buffers created by driver B, then we call B as the -exporter, and A as buffer-user. - -The exporter -- implements and manages operations[1] for the buffer -- allows other users to share the buffer by using dma_buf sharing APIs, -- manages the details of buffer allocation, -- decides about the actual backing storage where this allocation happens, -- takes care of any migration of scatterlist - for all (shared) users of this - buffer, - -The buffer-user -- is one of (many) sharing users of the buffer. -- doesn't need to worry about how the buffer is allocated, or where. -- needs a mechanism to get access to the scatterlist that makes up this buffer - in memory, mapped into its own address space, so it can access the same area - of memory. - -dma-buf operations for device dma only --------------------------------------- - -The dma_buf buffer sharing API usage contains the following steps: - -1. Exporter announces that it wishes to export a buffer -2. Userspace gets the file descriptor associated with the exported buffer, and - passes it around to potential buffer-users based on use case -3. Each buffer-user 'connects' itself to the buffer -4. When needed, buffer-user requests access to the buffer from exporter -5. When finished with its use, the buffer-user notifies end-of-DMA to exporter -6. when buffer-user is done using this buffer completely, it 'disconnects' - itself from the buffer. - - -1. Exporter's announcement of buffer export - - The buffer exporter announces its wish to export a buffer. In this, it - connects its own private buffer data, provides implementation for operations - that can be performed on the exported dma_buf, and flags for the file - associated with this buffer. All these fields are filled in struct - dma_buf_export_info, defined via the DEFINE_DMA_BUF_EXPORT_INFO macro. - - Interface: - DEFINE_DMA_BUF_EXPORT_INFO(exp_info) - struct dma_buf *dma_buf_export(struct dma_buf_export_info *exp_info) - - If this succeeds, dma_buf_export allocates a dma_buf structure, and - returns a pointer to the same. It also associates an anonymous file with this - buffer, so it can be exported. On failure to allocate the dma_buf object, - it returns NULL. - - 'exp_name' in struct dma_buf_export_info is the name of exporter - to - facilitate information while debugging. It is set to KBUILD_MODNAME by - default, so exporters don't have to provide a specific name, if they don't - wish to. - - DEFINE_DMA_BUF_EXPORT_INFO macro defines the struct dma_buf_export_info, - zeroes it out and pre-populates exp_name in it. - - -2. Userspace gets a handle to pass around to potential buffer-users - - Userspace entity requests for a file-descriptor (fd) which is a handle to the - anonymous file associated with the buffer. It can then share the fd with other - drivers and/or processes. - - Interface: - int dma_buf_fd(struct dma_buf *dmabuf, int flags) - - This API installs an fd for the anonymous file associated with this buffer; - returns either 'fd', or error. - -3. Each buffer-user 'connects' itself to the buffer - - Each buffer-user now gets a reference to the buffer, using the fd passed to - it. - - Interface: - struct dma_buf *dma_buf_get(int fd) - - This API will return a reference to the dma_buf, and increment refcount for - it. - - After this, the buffer-user needs to attach its device with the buffer, which - helps the exporter to know of device buffer constraints. - - Interface: - struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, - struct device *dev) - - This API returns reference to an attachment structure, which is then used - for scatterlist operations. It will optionally call the 'attach' dma_buf - operation, if provided by the exporter. - - The dma-buf sharing framework does the bookkeeping bits related to managing - the list of all attachments to a buffer. - -Until this stage, the buffer-exporter has the option to choose not to actually -allocate the backing storage for this buffer, but wait for the first buffer-user -to request use of buffer for allocation. - - -4. When needed, buffer-user requests access to the buffer - - Whenever a buffer-user wants to use the buffer for any DMA, it asks for - access to the buffer using dma_buf_map_attachment API. At least one attach to - the buffer must have happened before map_dma_buf can be called. - - Interface: - struct sg_table * dma_buf_map_attachment(struct dma_buf_attachment *, - enum dma_data_direction); - - This is a wrapper to dma_buf->ops->map_dma_buf operation, which hides the - "dma_buf->ops->" indirection from the users of this interface. - - In struct dma_buf_ops, map_dma_buf is defined as - struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, - enum dma_data_direction); - - It is one of the buffer operations that must be implemented by the exporter. - It should return the sg_table containing scatterlist for this buffer, mapped - into caller's address space. - - If this is being called for the first time, the exporter can now choose to - scan through the list of attachments for this buffer, collate the requirements - of the attached devices, and choose an appropriate backing storage for the - buffer. - - Based on enum dma_data_direction, it might be possible to have multiple users - accessing at the same time (for reading, maybe), or any other kind of sharing - that the exporter might wish to make available to buffer-users. - - map_dma_buf() operation can return -EINTR if it is interrupted by a signal. - - -5. When finished, the buffer-user notifies end-of-DMA to exporter - - Once the DMA for the current buffer-user is over, it signals 'end-of-DMA' to - the exporter using the dma_buf_unmap_attachment API. - - Interface: - void dma_buf_unmap_attachment(struct dma_buf_attachment *, - struct sg_table *); - - This is a wrapper to dma_buf->ops->unmap_dma_buf() operation, which hides the - "dma_buf->ops->" indirection from the users of this interface. - - In struct dma_buf_ops, unmap_dma_buf is defined as - void (*unmap_dma_buf)(struct dma_buf_attachment *, - struct sg_table *, - enum dma_data_direction); - - unmap_dma_buf signifies the end-of-DMA for the attachment provided. Like - map_dma_buf, this API also must be implemented by the exporter. - - -6. when buffer-user is done using this buffer, it 'disconnects' itself from the - buffer. - - After the buffer-user has no more interest in using this buffer, it should - disconnect itself from the buffer: - - - it first detaches itself from the buffer. - - Interface: - void dma_buf_detach(struct dma_buf *dmabuf, - struct dma_buf_attachment *dmabuf_attach); - - This API removes the attachment from the list in dmabuf, and optionally calls - dma_buf->ops->detach(), if provided by exporter, for any housekeeping bits. - - - Then, the buffer-user returns the buffer reference to exporter. - - Interface: - void dma_buf_put(struct dma_buf *dmabuf); - - This API then reduces the refcount for this buffer. - - If, as a result of this call, the refcount becomes 0, the 'release' file - operation related to this fd is called. It calls the dmabuf->ops->release() - operation in turn, and frees the memory allocated for dmabuf when exported. - -NOTES: -- Importance of attach-detach and {map,unmap}_dma_buf operation pairs - The attach-detach calls allow the exporter to figure out backing-storage - constraints for the currently-interested devices. This allows preferential - allocation, and/or migration of pages across different types of storage - available, if possible. - - Bracketing of DMA access with {map,unmap}_dma_buf operations is essential - to allow just-in-time backing of storage, and migration mid-way through a - use-case. - -- Migration of backing storage if needed - If after - - at least one map_dma_buf has happened, - - and the backing storage has been allocated for this buffer, - another new buffer-user intends to attach itself to this buffer, it might - be allowed, if possible for the exporter. - - In case it is allowed by the exporter: - if the new buffer-user has stricter 'backing-storage constraints', and the - exporter can handle these constraints, the exporter can just stall on the - map_dma_buf until all outstanding access is completed (as signalled by - unmap_dma_buf). - Once all users have finished accessing and have unmapped this buffer, the - exporter could potentially move the buffer to the stricter backing-storage, - and then allow further {map,unmap}_dma_buf operations from any buffer-user - from the migrated backing-storage. - - If the exporter cannot fulfill the backing-storage constraints of the new - buffer-user device as requested, dma_buf_attach() would return an error to - denote non-compatibility of the new buffer-sharing request with the current - buffer. - - If the exporter chooses not to allow an attach() operation once a - map_dma_buf() API has been called, it simply returns an error. Kernel cpu access to a dma-buf buffer object -------------------------------------------- diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst index a9b457a4b949..906d1532efad 100644 --- a/Documentation/driver-api/dma-buf.rst +++ b/Documentation/driver-api/dma-buf.rst @@ -17,6 +17,44 @@ shared or exclusive fence(s) associated with the buffer. Shared DMA Buffers ------------------ +This document serves as a guide to device-driver writers on what is the dma-buf +buffer sharing API, how to use it for exporting and using shared buffers. + +Any device driver which wishes to be a part of DMA buffer sharing, can do so as +either the 'exporter' of buffers, or the 'user' or 'importer' of buffers. + +Say a driver A wants to use buffers created by driver B, then we call B as the +exporter, and A as buffer-user/importer. + +The exporter + + - implements and manages operations in :c:type:`struct dma_buf_ops + ` for the buffer, + - allows other users to share the buffer by using dma_buf sharing APIs, + - manages the details of buffer allocation, wrapped int a :c:type:`struct + dma_buf `, + - decides about the actual backing storage where this allocation happens, + - and takes care of any migration of scatterlist - for all (shared) users of + this buffer. + +The buffer-user + + - is one of (many) sharing users of the buffer. + - doesn't need to worry about how the buffer is allocated, or where. + - and needs a mechanism to get access to the scatterlist that makes up this + buffer in memory, mapped into its own address space, so it can access the + same area of memory. This interface is provided by :c:type:`struct + dma_buf_attachment `. + +Basic Operation and Device DMA Access +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. kernel-doc:: drivers/dma-buf/dma-buf.c + :doc: dma buf device access + +Kernel Functions and Structures Reference +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + .. kernel-doc:: drivers/dma-buf/dma-buf.c :export: diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index e72e64484131..09f948fd62ad 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -313,6 +313,37 @@ static inline int is_dma_buf_file(struct file *file) return file->f_op == &dma_buf_fops; } +/** + * DOC: dma buf device access + * + * For device DMA access to a shared DMA buffer the usual sequence of operations + * is fairly simple: + * + * 1. The exporter defines his exporter instance using + * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private + * buffer object into a &dma_buf. It then exports that &dma_buf to userspace + * as a file descriptor by calling dma_buf_fd(). + * + * 2. Userspace passes this file-descriptors to all drivers it wants this buffer + * to share with: First the filedescriptor is converted to a &dma_buf using + * dma_buf_get(). The the buffer is attached to the device using + * dma_buf_attach(). + * + * Up to this stage the exporter is still free to migrate or reallocate the + * backing storage. + * + * 3. Once the buffer is attached to all devices userspace can inniate DMA + * access to the shared buffer. In the kernel this is done by calling + * dma_buf_map_attachment() and dma_buf_unmap_attachment(). + * + * 4. Once a driver is done with a shared buffer it needs to call + * dma_buf_detach() (after cleaning up any mappings) and then release the + * reference acquired with dma_buf_get by calling dma_buf_put(). + * + * For the detailed semantics exporters are expected to implement see + * &dma_buf_ops. + */ + /** * dma_buf_export - Creates a new dma_buf, and associates an anon file * with this buffer, so it can be exported. @@ -320,13 +351,15 @@ static inline int is_dma_buf_file(struct file *file) * Additionally, provide a name string for exporter; useful in debugging. * * @exp_info: [in] holds all the export related information provided - * by the exporter. see struct dma_buf_export_info + * by the exporter. see struct &dma_buf_export_info * for further details. * * Returns, on success, a newly created dma_buf object, which wraps the * supplied private data and operations for dma_buf_ops. On either missing * ops, or error in allocating struct dma_buf, will return negative error. * + * For most cases the easiest way to create @exp_info is through the + * %DEFINE_DMA_BUF_EXPORT_INFO macro. */ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) { @@ -458,7 +491,12 @@ EXPORT_SYMBOL_GPL(dma_buf_get); * dma_buf_put - decreases refcount of the buffer * @dmabuf: [in] buffer to reduce refcount of * - * Uses file's refcounting done implicitly by fput() + * Uses file's refcounting done implicitly by fput(). + * + * If, as a result of this call, the refcount becomes 0, the 'release' file + * operation related to this fd is called. It calls the release operation of + * struct &dma_buf_ops in turn, and frees the memory allocated for dmabuf when + * exported. */ void dma_buf_put(struct dma_buf *dmabuf) { @@ -475,8 +513,17 @@ EXPORT_SYMBOL_GPL(dma_buf_put); * @dmabuf: [in] buffer to attach device to. * @dev: [in] device to be attached. * - * Returns struct dma_buf_attachment * for this attachment; returns ERR_PTR on - * error. + * Returns struct dma_buf_attachment pointer for this attachment. Attachments + * must be cleaned up by calling dma_buf_detach(). + * + * Returns: + * + * A pointer to newly created &dma_buf_attachment on success, or a negative + * error code wrapped into a pointer on failure. + * + * Note that this can fail if the backing storage of @dmabuf is in a place not + * accessible to @dev, and cannot be moved to a more suitable place. This is + * indicated with the error code -EBUSY. */ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, struct device *dev) @@ -519,6 +566,7 @@ EXPORT_SYMBOL_GPL(dma_buf_attach); * @dmabuf: [in] buffer to detach from. * @attach: [in] attachment to be detached; is free'd after this call. * + * Clean up a device attachment obtained by calling dma_buf_attach(). */ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) { @@ -543,7 +591,12 @@ EXPORT_SYMBOL_GPL(dma_buf_detach); * @direction: [in] direction of DMA transfer * * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR - * on error. + * on error. May return -EINTR if it is interrupted by a signal. + * + * A mapping must be unmapped again using dma_buf_map_attachment(). Note that + * the underlying backing storage is pinned for as long as a mapping exists, + * therefore users/importers should not hold onto a mapping for undue amounts of + * time. */ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, enum dma_data_direction direction) @@ -571,6 +624,7 @@ EXPORT_SYMBOL_GPL(dma_buf_map_attachment); * @sg_table: [in] scatterlist info of the buffer to unmap * @direction: [in] direction of DMA transfer * + * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment(). */ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, struct sg_table *sg_table, diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index d5179d7e8575..07cb9b908f30 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -462,4 +462,3 @@ static const struct file_operations sync_file_fops = { .unlocked_ioctl = sync_file_ioctl, .compat_ioctl = sync_file_ioctl, }; - diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 8daeb3ce0016..6df170fb243f 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -39,19 +39,6 @@ struct dma_buf_attachment; /** * struct dma_buf_ops - operations possible on struct dma_buf - * @attach: [optional] allows different devices to 'attach' themselves to the - * given buffer. It might return -EBUSY to signal that backing storage - * is already allocated and incompatible with the requirements - * of requesting device. - * @detach: [optional] detach a given device from this buffer. - * @map_dma_buf: returns list of scatter pages allocated, increases usecount - * of the buffer. Requires atleast one attach to be called - * before. Returned sg list should already be mapped into - * _device_ address space. This call may sleep. May also return - * -EINTR. Should return -EINVAL if attach hasn't been called yet. - * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter - * pages. - * @release: release this buffer; to be called after the last dma_buf_put. * @begin_cpu_access: [optional] called before cpu access to invalidate cpu * caches and allocate backing storage (if not yet done) * respectively pin the object into memory. @@ -72,25 +59,109 @@ struct dma_buf_attachment; * @vunmap: [optional] unmaps a vmap from the buffer */ struct dma_buf_ops { + /** + * @attach: + * + * This is called from dma_buf_attach() to make sure that a given + * &device can access the provided &dma_buf. Exporters which support + * buffer objects in special locations like VRAM or device-specific + * carveout areas should check whether the buffer could be move to + * system memory (or directly accessed by the provided device), and + * otherwise need to fail the attach operation. + * + * The exporter should also in general check whether the current + * allocation fullfills the DMA constraints of the new device. If this + * is not the case, and the allocation cannot be moved, it should also + * fail the attach operation. + * + * Any exporter-private housekeeping data can be stored in the priv + * pointer of &dma_buf_attachment structure. + * + * This callback is optional. + * + * Returns: + * + * 0 on success, negative error code on failure. It might return -EBUSY + * to signal that backing storage is already allocated and incompatible + * with the requirements of requesting device. + */ int (*attach)(struct dma_buf *, struct device *, - struct dma_buf_attachment *); + struct dma_buf_attachment *); + /** + * @detach: + * + * This is called by dma_buf_detach() to release a &dma_buf_attachment. + * Provided so that exporters can clean up any housekeeping for an + * &dma_buf_attachment. + * + * This callback is optional. + */ void (*detach)(struct dma_buf *, struct dma_buf_attachment *); - /* For {map,unmap}_dma_buf below, any specific buffer attributes - * required should get added to device_dma_parameters accessible - * via dev->dma_params. + /** + * @map_dma_buf: + * + * This is called by dma_buf_map_attachment() and is used to map a + * shared &dma_buf into device address space, and it is mandatory. It + * can only be called if @attach has been called successfully. This + * essentially pins the DMA buffer into place, and it cannot be moved + * any more + * + * This call may sleep, e.g. when the backing storage first needs to be + * allocated, or moved to a location suitable for all currently attached + * devices. + * + * Note that any specific buffer attributes required for this function + * should get added to device_dma_parameters accessible via + * device->dma_params from the &dma_buf_attachment. The @attach callback + * should also check these constraints. + * + * If this is being called for the first time, the exporter can now + * choose to scan through the list of attachments for this buffer, + * collate the requirements of the attached devices, and choose an + * appropriate backing storage for the buffer. + * + * Based on enum dma_data_direction, it might be possible to have + * multiple users accessing at the same time (for reading, maybe), or + * any other kind of sharing that the exporter might wish to make + * available to buffer-users. + * + * Returns: + * + * A &sg_table scatter list of or the backing storage of the DMA buffer, + * already mapped into the device address space of the &device attached + * with the provided &dma_buf_attachment. + * + * On failure, returns a negative error value wrapped into a pointer. + * May also return -EINTR when a signal was received while being + * blocked. */ struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, - enum dma_data_direction); + enum dma_data_direction); + /** + * @unmap_dma_buf: + * + * This is called by dma_buf_unmap_attachment() and should unmap and + * release the &sg_table allocated in @map_dma_buf, and it is mandatory. + * It should also unpin the backing storage if this is the last mapping + * of the DMA buffer, it the exporter supports backing storage + * migration. + */ void (*unmap_dma_buf)(struct dma_buf_attachment *, - struct sg_table *, - enum dma_data_direction); + struct sg_table *, + enum dma_data_direction); + /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY * if the call would block. */ - /* after final dma_buf_put() */ + /** + * @release: + * + * Called after the last dma_buf_put to release the &dma_buf, and + * mandatory. + */ void (*release)(struct dma_buf *); int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); @@ -124,6 +195,15 @@ struct dma_buf_ops { * @poll: for userspace poll support * @cb_excl: for userspace poll support * @cb_shared: for userspace poll support + * + * This represents a shared buffer, created by calling dma_buf_export(). The + * userspace representation is a normal file descriptor, which can be created by + * calling dma_buf_fd(). + * + * Shared dma buffers are reference counted using dma_buf_put() and + * get_dma_buf(). + * + * Device DMA access is handled by the separate struct &dma_buf_attachment. */ struct dma_buf { size_t size; @@ -160,6 +240,11 @@ struct dma_buf { * This structure holds the attachment information between the dma_buf buffer * and its user device(s). The list contains one attachment struct per device * attached to the buffer. + * + * An attachment is created by calling dma_buf_attach(), and released again by + * calling dma_buf_detach(). The DMA mapping itself needed to initiate a + * transfer is created by dma_buf_map_attachment() and freed again by calling + * dma_buf_unmap_attachment(). */ struct dma_buf_attachment { struct dma_buf *dmabuf; @@ -192,9 +277,11 @@ struct dma_buf_export_info { }; /** - * helper macro for exporters; zeros and fills in most common values - * + * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters * @name: export-info name + * + * DEFINE_DMA_BUF_EXPORT_INFO macro defines the struct &dma_buf_export_info, + * zeroes it out and pre-populates exp_name in it. */ #define DEFINE_DMA_BUF_EXPORT_INFO(name) \ struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \ -- cgit v1.2.3 From 0959a1683d78270bab6381d498707fb8655ae11c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 9 Dec 2016 19:53:08 +0100 Subject: dma-buf: Update cpu access documentation - Again move the information relevant for driver writers next to the callbacks. - Put the overview and userspace interface documentation into a DOC: section within the code. - Remove the text that mmap needs to be coherent - since the DMA_BUF_IOCTL_SYNC landed that's no longer the case. But keep the text that for pte zapping exporters need to adjust the address space. - Add a FIXME that kmap and the new begin/end stuff used by the SYNC ioctl don't really mix correctly. That's something I just realized while doing this doc rework. - Augment function and structure docs like usual. Cc: linux-doc@vger.kernel.org Cc: Jonathan Corbet Cc: Sumit Semwal Signed-off-by: Daniel Vetter Signed-off-by: Sumit Semwal [sumits: fix cosmetic issues] Link: http://patchwork.freedesktop.org/patch/msgid/20161209185309.1682-5-daniel.vetter@ffwll.ch --- Documentation/dma-buf-sharing.txt | 213 ----------------------------------- Documentation/driver-api/dma-buf.rst | 6 + drivers/dma-buf/dma-buf.c | 122 ++++++++++++++++++++ include/linux/dma-buf.h | 91 +++++++++++++-- 4 files changed, 211 insertions(+), 221 deletions(-) (limited to 'drivers') diff --git a/Documentation/dma-buf-sharing.txt b/Documentation/dma-buf-sharing.txt index dca2fb7ac3b4..74c99edb7976 100644 --- a/Documentation/dma-buf-sharing.txt +++ b/Documentation/dma-buf-sharing.txt @@ -6,205 +6,6 @@ -Kernel cpu access to a dma-buf buffer object --------------------------------------------- - -The motivation to allow cpu access from the kernel to a dma-buf object from the -importers side are: -- fallback operations, e.g. if the devices is connected to a usb bus and the - kernel needs to shuffle the data around first before sending it away. -- full transparency for existing users on the importer side, i.e. userspace - should not notice the difference between a normal object from that subsystem - and an imported one backed by a dma-buf. This is really important for drm - opengl drivers that expect to still use all the existing upload/download - paths. - -Access to a dma_buf from the kernel context involves three steps: - -1. Prepare access, which invalidate any necessary caches and make the object - available for cpu access. -2. Access the object page-by-page with the dma_buf map apis -3. Finish access, which will flush any necessary cpu caches and free reserved - resources. - -1. Prepare access - - Before an importer can access a dma_buf object with the cpu from the kernel - context, it needs to notify the exporter of the access that is about to - happen. - - Interface: - int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, - enum dma_data_direction direction) - - This allows the exporter to ensure that the memory is actually available for - cpu access - the exporter might need to allocate or swap-in and pin the - backing storage. The exporter also needs to ensure that cpu access is - coherent for the access direction. The direction can be used by the exporter - to optimize the cache flushing, i.e. access with a different direction (read - instead of write) might return stale or even bogus data (e.g. when the - exporter needs to copy the data to temporary storage). - - This step might fail, e.g. in oom conditions. - -2. Accessing the buffer - - To support dma_buf objects residing in highmem cpu access is page-based using - an api similar to kmap. Accessing a dma_buf is done in aligned chunks of - PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which returns - a pointer in kernel virtual address space. Afterwards the chunk needs to be - unmapped again. There is no limit on how often a given chunk can be mapped - and unmapped, i.e. the importer does not need to call begin_cpu_access again - before mapping the same chunk again. - - Interfaces: - void *dma_buf_kmap(struct dma_buf *, unsigned long); - void dma_buf_kunmap(struct dma_buf *, unsigned long, void *); - - There are also atomic variants of these interfaces. Like for kmap they - facilitate non-blocking fast-paths. Neither the importer nor the exporter (in - the callback) is allowed to block when using these. - - Interfaces: - void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long); - void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *); - - For importers all the restrictions of using kmap apply, like the limited - supply of kmap_atomic slots. Hence an importer shall only hold onto at most 2 - atomic dma_buf kmaps at the same time (in any given process context). - - dma_buf kmap calls outside of the range specified in begin_cpu_access are - undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on - the partial chunks at the beginning and end but may return stale or bogus - data outside of the range (in these partial chunks). - - Note that these calls need to always succeed. The exporter needs to complete - any preparations that might fail in begin_cpu_access. - - For some cases the overhead of kmap can be too high, a vmap interface - is introduced. This interface should be used very carefully, as vmalloc - space is a limited resources on many architectures. - - Interfaces: - void *dma_buf_vmap(struct dma_buf *dmabuf) - void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) - - The vmap call can fail if there is no vmap support in the exporter, or if it - runs out of vmalloc space. Fallback to kmap should be implemented. Note that - the dma-buf layer keeps a reference count for all vmap access and calls down - into the exporter's vmap function only when no vmapping exists, and only - unmaps it once. Protection against concurrent vmap/vunmap calls is provided - by taking the dma_buf->lock mutex. - -3. Finish access - - When the importer is done accessing the CPU, it needs to announce this to - the exporter (to facilitate cache flushing and unpinning of any pinned - resources). The result of any dma_buf kmap calls after end_cpu_access is - undefined. - - Interface: - void dma_buf_end_cpu_access(struct dma_buf *dma_buf, - enum dma_data_direction dir); - - -Direct Userspace Access/mmap Support ------------------------------------- - -Being able to mmap an export dma-buf buffer object has 2 main use-cases: -- CPU fallback processing in a pipeline and -- supporting existing mmap interfaces in importers. - -1. CPU fallback processing in a pipeline - - In many processing pipelines it is sometimes required that the cpu can access - the data in a dma-buf (e.g. for thumbnail creation, snapshots, ...). To avoid - the need to handle this specially in userspace frameworks for buffer sharing - it's ideal if the dma_buf fd itself can be used to access the backing storage - from userspace using mmap. - - Furthermore Android's ION framework already supports this (and is otherwise - rather similar to dma-buf from a userspace consumer side with using fds as - handles, too). So it's beneficial to support this in a similar fashion on - dma-buf to have a good transition path for existing Android userspace. - - No special interfaces, userspace simply calls mmap on the dma-buf fd, making - sure that the cache synchronization ioctl (DMA_BUF_IOCTL_SYNC) is *always* - used when the access happens. Note that DMA_BUF_IOCTL_SYNC can fail with - -EAGAIN or -EINTR, in which case it must be restarted. - - Some systems might need some sort of cache coherency management e.g. when - CPU and GPU domains are being accessed through dma-buf at the same time. To - circumvent this problem there are begin/end coherency markers, that forward - directly to existing dma-buf device drivers vfunc hooks. Userspace can make - use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The sequence - would be used like following: - - mmap dma-buf fd - - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write - to mmap area 3. SYNC_END ioctl. This can be repeated as often as you - want (with the new data being consumed by the GPU or say scanout device) - - munmap once you don't need the buffer any more - - For correctness and optimal performance, it is always required to use - SYNC_START and SYNC_END before and after, respectively, when accessing the - mapped address. Userspace cannot rely on coherent access, even when there - are systems where it just works without calling these ioctls. - -2. Supporting existing mmap interfaces in importers - - Similar to the motivation for kernel cpu access it is again important that - the userspace code of a given importing subsystem can use the same interfaces - with a imported dma-buf buffer object as with a native buffer object. This is - especially important for drm where the userspace part of contemporary OpenGL, - X, and other drivers is huge, and reworking them to use a different way to - mmap a buffer rather invasive. - - The assumption in the current dma-buf interfaces is that redirecting the - initial mmap is all that's needed. A survey of some of the existing - subsystems shows that no driver seems to do any nefarious thing like syncing - up with outstanding asynchronous processing on the device or allocating - special resources at fault time. So hopefully this is good enough, since - adding interfaces to intercept pagefaults and allow pte shootdowns would - increase the complexity quite a bit. - - Interface: - int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, - unsigned long); - - If the importing subsystem simply provides a special-purpose mmap call to set - up a mapping in userspace, calling do_mmap with dma_buf->file will equally - achieve that for a dma-buf object. - -3. Implementation notes for exporters - - Because dma-buf buffers have invariant size over their lifetime, the dma-buf - core checks whether a vma is too large and rejects such mappings. The - exporter hence does not need to duplicate this check. - - Because existing importing subsystems might presume coherent mappings for - userspace, the exporter needs to set up a coherent mapping. If that's not - possible, it needs to fake coherency by manually shooting down ptes when - leaving the cpu domain and flushing caches at fault time. Note that all the - dma_buf files share the same anon inode, hence the exporter needs to replace - the dma_buf file stored in vma->vm_file with it's own if pte shootdown is - required. This is because the kernel uses the underlying inode's address_space - for vma tracking (and hence pte tracking at shootdown time with - unmap_mapping_range). - - If the above shootdown dance turns out to be too expensive in certain - scenarios, we can extend dma-buf with a more explicit cache tracking scheme - for userspace mappings. But the current assumption is that using mmap is - always a slower path, so some inefficiencies should be acceptable. - - Exporters that shoot down mappings (for any reasons) shall not do any - synchronization at fault time with outstanding device operations. - Synchronization is an orthogonal issue to sharing the backing storage of a - buffer and hence should not be handled by dma-buf itself. This is explicitly - mentioned here because many people seem to want something like this, but if - different exporters handle this differently, buffer sharing can fail in - interesting ways depending upong the exporter (if userspace starts depending - upon this implicit synchronization). - Other Interfaces Exposed to Userspace on the dma-buf FD ------------------------------------------------------ @@ -240,20 +41,6 @@ Miscellaneous notes the exporting driver to create a dmabuf fd must provide a way to let userspace control setting of O_CLOEXEC flag passed in to dma_buf_fd(). -- If an exporter needs to manually flush caches and hence needs to fake - coherency for mmap support, it needs to be able to zap all the ptes pointing - at the backing storage. Now linux mm needs a struct address_space associated - with the struct file stored in vma->vm_file to do that with the function - unmap_mapping_range. But the dma_buf framework only backs every dma_buf fd - with the anon_file struct file, i.e. all dma_bufs share the same file. - - Hence exporters need to setup their own file (and address_space) association - by setting vma->vm_file and adjusting vma->vm_pgoff in the dma_buf mmap - callback. In the specific case of a gem driver the exporter could use the - shmem file already provided by gem (and set vm_pgoff = 0). Exporters can then - zap ptes by unmapping the corresponding range of the struct address_space - associated with their own file. - References: [1] struct dma_buf_ops in include/linux/dma-buf.h [2] All interfaces mentioned above defined in include/linux/dma-buf.h diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst index 906d1532efad..92e417035e16 100644 --- a/Documentation/driver-api/dma-buf.rst +++ b/Documentation/driver-api/dma-buf.rst @@ -52,6 +52,12 @@ Basic Operation and Device DMA Access .. kernel-doc:: drivers/dma-buf/dma-buf.c :doc: dma buf device access +CPU Access to DMA Buffer Objects +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. kernel-doc:: drivers/dma-buf/dma-buf.c + :doc: cpu access + Kernel Functions and Structures Reference ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 09f948fd62ad..eae0846cbd95 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -640,6 +640,122 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, } EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); +/** + * DOC: cpu access + * + * There are mutliple reasons for supporting CPU access to a dma buffer object: + * + * - Fallback operations in the kernel, for example when a device is connected + * over USB and the kernel needs to shuffle the data around first before + * sending it away. Cache coherency is handled by braketing any transactions + * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access() + * access. + * + * To support dma_buf objects residing in highmem cpu access is page-based + * using an api similar to kmap. Accessing a dma_buf is done in aligned chunks + * of PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which + * returns a pointer in kernel virtual address space. Afterwards the chunk + * needs to be unmapped again. There is no limit on how often a given chunk + * can be mapped and unmapped, i.e. the importer does not need to call + * begin_cpu_access again before mapping the same chunk again. + * + * Interfaces:: + * void \*dma_buf_kmap(struct dma_buf \*, unsigned long); + * void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*); + * + * There are also atomic variants of these interfaces. Like for kmap they + * facilitate non-blocking fast-paths. Neither the importer nor the exporter + * (in the callback) is allowed to block when using these. + * + * Interfaces:: + * void \*dma_buf_kmap_atomic(struct dma_buf \*, unsigned long); + * void dma_buf_kunmap_atomic(struct dma_buf \*, unsigned long, void \*); + * + * For importers all the restrictions of using kmap apply, like the limited + * supply of kmap_atomic slots. Hence an importer shall only hold onto at + * max 2 atomic dma_buf kmaps at the same time (in any given process context). + * + * dma_buf kmap calls outside of the range specified in begin_cpu_access are + * undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on + * the partial chunks at the beginning and end but may return stale or bogus + * data outside of the range (in these partial chunks). + * + * Note that these calls need to always succeed. The exporter needs to + * complete any preparations that might fail in begin_cpu_access. + * + * For some cases the overhead of kmap can be too high, a vmap interface + * is introduced. This interface should be used very carefully, as vmalloc + * space is a limited resources on many architectures. + * + * Interfaces:: + * void \*dma_buf_vmap(struct dma_buf \*dmabuf) + * void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr) + * + * The vmap call can fail if there is no vmap support in the exporter, or if + * it runs out of vmalloc space. Fallback to kmap should be implemented. Note + * that the dma-buf layer keeps a reference count for all vmap access and + * calls down into the exporter's vmap function only when no vmapping exists, + * and only unmaps it once. Protection against concurrent vmap/vunmap calls is + * provided by taking the dma_buf->lock mutex. + * + * - For full compatibility on the importer side with existing userspace + * interfaces, which might already support mmap'ing buffers. This is needed in + * many processing pipelines (e.g. feeding a software rendered image into a + * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION + * framework already supported this and for DMA buffer file descriptors to + * replace ION buffers mmap support was needed. + * + * There is no special interfaces, userspace simply calls mmap on the dma-buf + * fd. But like for CPU access there's a need to braket the actual access, + * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that + * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must + * be restarted. + * + * Some systems might need some sort of cache coherency management e.g. when + * CPU and GPU domains are being accessed through dma-buf at the same time. + * To circumvent this problem there are begin/end coherency markers, that + * forward directly to existing dma-buf device drivers vfunc hooks. Userspace + * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The + * sequence would be used like following: + * + * - mmap dma-buf fd + * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write + * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you + * want (with the new data being consumed by say the GPU or the scanout + * device) + * - munmap once you don't need the buffer any more + * + * For correctness and optimal performance, it is always required to use + * SYNC_START and SYNC_END before and after, respectively, when accessing the + * mapped address. Userspace cannot rely on coherent access, even when there + * are systems where it just works without calling these ioctls. + * + * - And as a CPU fallback in userspace processing pipelines. + * + * Similar to the motivation for kernel cpu access it is again important that + * the userspace code of a given importing subsystem can use the same + * interfaces with a imported dma-buf buffer object as with a native buffer + * object. This is especially important for drm where the userspace part of + * contemporary OpenGL, X, and other drivers is huge, and reworking them to + * use a different way to mmap a buffer rather invasive. + * + * The assumption in the current dma-buf interfaces is that redirecting the + * initial mmap is all that's needed. A survey of some of the existing + * subsystems shows that no driver seems to do any nefarious thing like + * syncing up with outstanding asynchronous processing on the device or + * allocating special resources at fault time. So hopefully this is good + * enough, since adding interfaces to intercept pagefaults and allow pte + * shootdowns would increase the complexity quite a bit. + * + * Interface:: + * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*, + * unsigned long); + * + * If the importing subsystem simply provides a special-purpose mmap call to + * set up a mapping in userspace, calling do_mmap with dma_buf->file will + * equally achieve that for a dma-buf object. + */ + static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction) { @@ -665,6 +781,10 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, * @dmabuf: [in] buffer to prepare cpu access for. * @direction: [in] length of range for cpu access. * + * After the cpu access is complete the caller should call + * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is + * it guaranteed to be coherent with other DMA access. + * * Can return negative error values, returns 0 on success. */ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, @@ -697,6 +817,8 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); * @dmabuf: [in] buffer to complete cpu access for. * @direction: [in] length of range for cpu access. * + * This terminates CPU access started with dma_buf_begin_cpu_access(). + * * Can return negative error values, returns 0 on success. */ int dma_buf_end_cpu_access(struct dma_buf *dmabuf, diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 6df170fb243f..57828154e440 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -39,10 +39,6 @@ struct dma_buf_attachment; /** * struct dma_buf_ops - operations possible on struct dma_buf - * @begin_cpu_access: [optional] called before cpu access to invalidate cpu - * caches and allocate backing storage (if not yet done) - * respectively pin the object into memory. - * @end_cpu_access: [optional] called after cpu access to flush caches. * @kmap_atomic: maps a page from the buffer into kernel address * space, users may not block until the subsequent unmap call. * This callback must not sleep. @@ -50,10 +46,6 @@ struct dma_buf_attachment; * This Callback must not sleep. * @kmap: maps a page from the buffer into kernel address space. * @kunmap: [optional] unmaps a page from the buffer. - * @mmap: used to expose the backing storage to userspace. Note that the - * mapping needs to be coherent - if the exporter doesn't directly - * support this, it needs to fake coherency by shooting down any ptes - * when transitioning away from the cpu domain. * @vmap: [optional] creates a virtual mapping for the buffer into kernel * address space. Same restrictions as for vmap and friends apply. * @vunmap: [optional] unmaps a vmap from the buffer @@ -164,13 +156,96 @@ struct dma_buf_ops { */ void (*release)(struct dma_buf *); + /** + * @begin_cpu_access: + * + * This is called from dma_buf_begin_cpu_access() and allows the + * exporter to ensure that the memory is actually available for cpu + * access - the exporter might need to allocate or swap-in and pin the + * backing storage. The exporter also needs to ensure that cpu access is + * coherent for the access direction. The direction can be used by the + * exporter to optimize the cache flushing, i.e. access with a different + * direction (read instead of write) might return stale or even bogus + * data (e.g. when the exporter needs to copy the data to temporary + * storage). + * + * This callback is optional. + * + * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command + * from userspace (where storage shouldn't be pinned to avoid handing + * de-factor mlock rights to userspace) and for the kernel-internal + * users of the various kmap interfaces, where the backing storage must + * be pinned to guarantee that the atomic kmap calls can succeed. Since + * there's no in-kernel users of the kmap interfaces yet this isn't a + * real problem. + * + * Returns: + * + * 0 on success or a negative error code on failure. This can for + * example fail when the backing storage can't be allocated. Can also + * return -ERESTARTSYS or -EINTR when the call has been interrupted and + * needs to be restarted. + */ int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); + + /** + * @end_cpu_access: + * + * This is called from dma_buf_end_cpu_access() when the importer is + * done accessing the CPU. The exporter can use this to flush caches and + * unpin any resources pinned in @begin_cpu_access. + * The result of any dma_buf kmap calls after end_cpu_access is + * undefined. + * + * This callback is optional. + * + * Returns: + * + * 0 on success or a negative error code on failure. Can return + * -ERESTARTSYS or -EINTR when the call has been interrupted and needs + * to be restarted. + */ int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); void *(*kmap_atomic)(struct dma_buf *, unsigned long); void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); void *(*kmap)(struct dma_buf *, unsigned long); void (*kunmap)(struct dma_buf *, unsigned long, void *); + /** + * @mmap: + * + * This callback is used by the dma_buf_mmap() function + * + * Note that the mapping needs to be incoherent, userspace is expected + * to braket CPU access using the DMA_BUF_IOCTL_SYNC interface. + * + * Because dma-buf buffers have invariant size over their lifetime, the + * dma-buf core checks whether a vma is too large and rejects such + * mappings. The exporter hence does not need to duplicate this check. + * Drivers do not need to check this themselves. + * + * If an exporter needs to manually flush caches and hence needs to fake + * coherency for mmap support, it needs to be able to zap all the ptes + * pointing at the backing storage. Now linux mm needs a struct + * address_space associated with the struct file stored in vma->vm_file + * to do that with the function unmap_mapping_range. But the dma_buf + * framework only backs every dma_buf fd with the anon_file struct file, + * i.e. all dma_bufs share the same file. + * + * Hence exporters need to setup their own file (and address_space) + * association by setting vma->vm_file and adjusting vma->vm_pgoff in + * the dma_buf mmap callback. In the specific case of a gem driver the + * exporter could use the shmem file already provided by gem (and set + * vm_pgoff = 0). Exporters can then zap ptes by unmapping the + * corresponding range of the struct address_space associated with their + * own file. + * + * This callback is optional. + * + * Returns: + * + * 0 on success or a negative error code on failure. + */ int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); void *(*vmap)(struct dma_buf *); -- cgit v1.2.3 From e7e21c72b178e963f3c990cb839d86f568999916 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 9 Dec 2016 22:50:55 +0100 Subject: dma-buf: Final bits of doc polish - Put all the remaing bits of the old doc into suitable places in the new sphinx world. - Also document the poll support, we forgot to do that. - Delete dma-buf-sharing.txt. v2: Don't forget to update MAINTAINERS. Cc: linux-doc@vger.kernel.org Cc: Jonathan Corbet Cc: Sumit Semwal Signed-off-by: Daniel Vetter Signed-off-by: Sumit Semwal Link: http://patchwork.freedesktop.org/patch/msgid/20161209215055.3492-1-daniel.vetter@ffwll.ch --- Documentation/dma-buf-sharing.txt | 47 ----------------------------------- Documentation/driver-api/dma-buf.rst | 48 ++++++++++++++++++++++++++++++++++++ MAINTAINERS | 2 +- drivers/dma-buf/dma-buf.c | 22 +++++++++++++++++ 4 files changed, 71 insertions(+), 48 deletions(-) delete mode 100644 Documentation/dma-buf-sharing.txt (limited to 'drivers') diff --git a/Documentation/dma-buf-sharing.txt b/Documentation/dma-buf-sharing.txt deleted file mode 100644 index 74c99edb7976..000000000000 --- a/Documentation/dma-buf-sharing.txt +++ /dev/null @@ -1,47 +0,0 @@ - DMA Buffer Sharing API Guide - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Sumit Semwal - - - - -Other Interfaces Exposed to Userspace on the dma-buf FD ------------------------------------------------------- - -- Since kernel 3.12 the dma-buf FD supports the llseek system call, but only - with offset=0 and whence=SEEK_END|SEEK_SET. SEEK_SET is supported to allow - the usual size discover pattern size = SEEK_END(0); SEEK_SET(0). Every other - llseek operation will report -EINVAL. - - If llseek on dma-buf FDs isn't support the kernel will report -ESPIPE for all - cases. Userspace can use this to detect support for discovering the dma-buf - size using llseek. - -Miscellaneous notes -------------------- - -- Any exporters or users of the dma-buf buffer sharing framework must have - a 'select DMA_SHARED_BUFFER' in their respective Kconfigs. - -- In order to avoid fd leaks on exec, the FD_CLOEXEC flag must be set - on the file descriptor. This is not just a resource leak, but a - potential security hole. It could give the newly exec'd application - access to buffers, via the leaked fd, to which it should otherwise - not be permitted access. - - The problem with doing this via a separate fcntl() call, versus doing it - atomically when the fd is created, is that this is inherently racy in a - multi-threaded app[3]. The issue is made worse when it is library code - opening/creating the file descriptor, as the application may not even be - aware of the fd's. - - To avoid this problem, userspace must have a way to request O_CLOEXEC - flag be set when the dma-buf fd is created. So any API provided by - the exporting driver to create a dmabuf fd must provide a way to let - userspace control setting of O_CLOEXEC flag passed in to dma_buf_fd(). - -References: -[1] struct dma_buf_ops in include/linux/dma-buf.h -[2] All interfaces mentioned above defined in include/linux/dma-buf.h -[3] https://lwn.net/Articles/236486/ diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst index 92e417035e16..31671b469627 100644 --- a/Documentation/driver-api/dma-buf.rst +++ b/Documentation/driver-api/dma-buf.rst @@ -46,6 +46,48 @@ The buffer-user same area of memory. This interface is provided by :c:type:`struct dma_buf_attachment `. +Any exporters or users of the dma-buf buffer sharing framework must have a +'select DMA_SHARED_BUFFER' in their respective Kconfigs. + +Userspace Interface Notes +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Mostly a DMA buffer file descriptor is simply an opaque object for userspace, +and hence the generic interface exposed is very minimal. There's a few things to +consider though: + +- Since kernel 3.12 the dma-buf FD supports the llseek system call, but only + with offset=0 and whence=SEEK_END|SEEK_SET. SEEK_SET is supported to allow + the usual size discover pattern size = SEEK_END(0); SEEK_SET(0). Every other + llseek operation will report -EINVAL. + + If llseek on dma-buf FDs isn't support the kernel will report -ESPIPE for all + cases. Userspace can use this to detect support for discovering the dma-buf + size using llseek. + +- In order to avoid fd leaks on exec, the FD_CLOEXEC flag must be set + on the file descriptor. This is not just a resource leak, but a + potential security hole. It could give the newly exec'd application + access to buffers, via the leaked fd, to which it should otherwise + not be permitted access. + + The problem with doing this via a separate fcntl() call, versus doing it + atomically when the fd is created, is that this is inherently racy in a + multi-threaded app[3]. The issue is made worse when it is library code + opening/creating the file descriptor, as the application may not even be + aware of the fd's. + + To avoid this problem, userspace must have a way to request O_CLOEXEC + flag be set when the dma-buf fd is created. So any API provided by + the exporting driver to create a dmabuf fd must provide a way to let + userspace control setting of O_CLOEXEC flag passed in to dma_buf_fd(). + +- Memory mapping the contents of the DMA buffer is also supported. See the + discussion below on `CPU Access to DMA Buffer Objects`_ for the full details. + +- The DMA buffer FD is also pollable, see `Fence Poll Support`_ below for + details. + Basic Operation and Device DMA Access ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -58,6 +100,12 @@ CPU Access to DMA Buffer Objects .. kernel-doc:: drivers/dma-buf/dma-buf.c :doc: cpu access +Fence Poll Support +~~~~~~~~~~~~~~~~~~ + +.. kernel-doc:: drivers/dma-buf/dma-buf.c + :doc: fence polling + Kernel Functions and Structures Reference ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/MAINTAINERS b/MAINTAINERS index f32012a01468..3da51b0437fb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3917,7 +3917,7 @@ F: drivers/dma-buf/ F: include/linux/dma-buf* F: include/linux/reservation.h F: include/linux/*fence.h -F: Documentation/dma-buf-sharing.txt +F: Documentation/driver-api/dma-buf.rst T: git git://anongit.freedesktop.org/drm/drm-misc SYNC FILE FRAMEWORK diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index eae0846cbd95..91aff74ed092 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -124,6 +124,28 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) return base + offset; } +/** + * DOC: fence polling + * + * To support cross-device and cross-driver synchronization of buffer access + * implicit fences (represented internally in the kernel with struct &fence) can + * be attached to a &dma_buf. The glue for that and a few related things are + * provided in the &reservation_object structure. + * + * Userspace can query the state of these implicitly tracked fences using poll() + * and related system calls: + * + * - Checking for POLLIN, i.e. read access, can be use to query the state of the + * most recent write or exclusive fence. + * + * - Checking for POLLOUT, i.e. write access, can be used to query the state of + * all attached fences, shared and exclusive ones. + * + * Note that this only signals the completion of the respective fences, i.e. the + * DMA transfers are complete. Cache flushing and any other necessary + * preparations before CPU access can begin still need to happen. + */ + static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; -- cgit v1.2.3 From 213e08ad60baca3676d6633ca91e523735eed474 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 5 Dec 2016 09:30:34 +0200 Subject: drm/i915/bxt: add bxt dsi gpio element support Request the GPIO by index through the consumer API. For now, use a quick hack to store the already requested ones, simply because I have no idea whether this actually works or not, and I have no way to test it. v2 by Mika: switch *NULL* to *"panel"* when requesting gpio for MIPI/DSI panel. Signed-off-by: Jani Nikula Signed-off-by: Mika Kahola Link: http://patchwork.freedesktop.org/patch/msgid/1480923034-21916-1-git-send-email-mika.kahola@intel.com --- drivers/gpu/drm/i915/intel_dsi_panel_vbt.c | 38 +++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c index 47cd1b20fb3e..8f683b8b1816 100644 --- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include