summaryrefslogtreecommitdiff
path: root/arch/powerpc/perf
diff options
context:
space:
mode:
authorAnju T Sudhakar <anju@linux.vnet.ibm.com>2018-05-18 10:35:25 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2018-08-07 17:32:32 +0300
commit7ccc4fe5ff9e3a134e863beed0dba18a5e511659 (patch)
treebbe637790c756e51d0880eae216f7aff5ebe3008 /arch/powerpc/perf
parent4231aba000f5a4583dd9f67057aadb68c3eca99d (diff)
downloadlinux-7ccc4fe5ff9e3a134e863beed0dba18a5e511659.tar.xz
powerpc/perf: Remove sched_task function defined for thread-imc
Call trace observed while running perf-fuzzer: CPU: 43 PID: 9088 Comm: perf_fuzzer Not tainted 4.13.0-32-generic #35~lp1746225 task: c000003f776ac900 task.stack: c000003f77728000 NIP: c000000000299b70 LR: c0000000002a4534 CTR: c00000000029bb80 REGS: c000003f7772b760 TRAP: 0700 Not tainted (4.13.0-32-generic) MSR: 900000000282b033 <SF,HV,VEC,VSX,EE,FP,ME,IR,DR,RI,LE> CR: 24008822 XER: 00000000 CFAR: c000000000299a70 SOFTE: 0 GPR00: c0000000002a4534 c000003f7772b9e0 c000000001606200 c000003fef858908 GPR04: c000003f776ac900 0000000000000001 ffffffffffffffff 0000003fee730000 GPR08: 0000000000000000 0000000000000000 c0000000011220d8 0000000000000002 GPR12: c00000000029bb80 c000000007a3d900 0000000000000000 0000000000000000 GPR16: 0000000000000000 0000000000000000 0000000000000000 0000000000000000 GPR20: 0000000000000000 0000000000000000 c000003f776ad090 c000000000c71354 GPR24: c000003fef716780 0000003fee730000 c000003fe69d4200 c000003f776ad330 GPR28: c0000000011220d8 0000000000000001 c0000000014c6108 c000003fef858900 NIP [c000000000299b70] perf_pmu_sched_task+0x170/0x180 LR [c0000000002a4534] __perf_event_task_sched_in+0xc4/0x230 Call Trace: perf_iterate_sb+0x158/0x2a0 (unreliable) __perf_event_task_sched_in+0xc4/0x230 finish_task_switch+0x21c/0x310 __schedule+0x304/0xb80 schedule+0x40/0xc0 do_wait+0x254/0x2e0 kernel_wait4+0xa0/0x1a0 SyS_wait4+0x64/0xc0 system_call+0x58/0x6c Instruction dump: 3beafea0 7faa4800 409eff18 e8010060 eb610028 ebc10040 7c0803a6 38210050 eb81ffe0 eba1ffe8 ebe1fff8 4e800020 <0fe00000> 4bffffbc 60000000 60420000 ---[ end trace 8c46856d314c1811 ]--- The context switch call-backs for thread-imc are defined in sched_task function. So when thread-imc events are grouped with software pmu events, perf_pmu_sched_task hits the WARN_ON_ONCE condition, since software PMUs are assumed not to have a sched_task defined. Patch to move the thread_imc enable/disable opal call back from sched_task to event_[add/del] function Fixes: f74c89bd80fb ("powerpc/perf: Add thread IMC PMU support") Signed-off-by: Anju T Sudhakar <anju@linux.vnet.ibm.com> Reviewed-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com> Tested-by: Joel Stanley <joel@jms.id.au> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/perf')
-rw-r--r--arch/powerpc/perf/imc-pmu.c108
1 files changed, 51 insertions, 57 deletions
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index d1977b61f827..1fafc32b12a0 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -867,59 +867,6 @@ static int thread_imc_cpu_init(void)
ppc_thread_imc_cpu_offline);
}
-void thread_imc_pmu_sched_task(struct perf_event_context *ctx,
- bool sched_in)
-{
- int core_id;
- struct imc_pmu_ref *ref;
-
- if (!is_core_imc_mem_inited(smp_processor_id()))
- return;
-
- core_id = smp_processor_id() / threads_per_core;
- /*
- * imc pmus are enabled only when it is used.
- * See if this is triggered for the first time.
- * If yes, take the mutex lock and enable the counters.
- * If not, just increment the count in ref count struct.
- */
- ref = &core_imc_refc[core_id];
- if (!ref)
- return;
-
- if (sched_in) {
- mutex_lock(&ref->lock);
- if (ref->refc == 0) {
- if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
- get_hard_smp_processor_id(smp_processor_id()))) {
- mutex_unlock(&ref->lock);
- pr_err("thread-imc: Unable to start the counter\
- for core %d\n", core_id);
- return;
- }
- }
- ++ref->refc;
- mutex_unlock(&ref->lock);
- } else {
- mutex_lock(&ref->lock);
- ref->refc--;
- if (ref->refc == 0) {
- if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
- get_hard_smp_processor_id(smp_processor_id()))) {
- mutex_unlock(&ref->lock);
- pr_err("thread-imc: Unable to stop the counters\
- for core %d\n", core_id);
- return;
- }
- } else if (ref->refc < 0) {
- ref->refc = 0;
- }
- mutex_unlock(&ref->lock);
- }
-
- return;
-}
-
static int thread_imc_event_init(struct perf_event *event)
{
u32 config = event->attr.config;
@@ -1046,22 +993,70 @@ static int imc_event_add(struct perf_event *event, int flags)
static int thread_imc_event_add(struct perf_event *event, int flags)
{
+ int core_id;
+ struct imc_pmu_ref *ref;
+
if (flags & PERF_EF_START)
imc_event_start(event, flags);
- /* Enable the sched_task to start the engine */
- perf_sched_cb_inc(event->ctx->pmu);
+ if (!is_core_imc_mem_inited(smp_processor_id()))
+ return -EINVAL;
+
+ core_id = smp_processor_id() / threads_per_core;
+ /*
+ * imc pmus are enabled only when it is used.
+ * See if this is triggered for the first time.
+ * If yes, take the mutex lock and enable the counters.
+ * If not, just increment the count in ref count struct.
+ */
+ ref = &core_imc_refc[core_id];
+ if (!ref)
+ return -EINVAL;
+
+ mutex_lock(&ref->lock);
+ if (ref->refc == 0) {
+ if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
+ get_hard_smp_processor_id(smp_processor_id()))) {
+ mutex_unlock(&ref->lock);
+ pr_err("thread-imc: Unable to start the counter\
+ for core %d\n", core_id);
+ return -EINVAL;
+ }
+ }
+ ++ref->refc;
+ mutex_unlock(&ref->lock);
return 0;
}
static void thread_imc_event_del(struct perf_event *event, int flags)
{
+
+ int core_id;
+ struct imc_pmu_ref *ref;
+
/*
* Take a snapshot and calculate the delta and update
* the event counter values.
*/
imc_event_update(event);
- perf_sched_cb_dec(event->ctx->pmu);
+
+ core_id = smp_processor_id() / threads_per_core;
+ ref = &core_imc_refc[core_id];
+
+ mutex_lock(&ref->lock);
+ ref->refc--;
+ if (ref->refc == 0) {
+ if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
+ get_hard_smp_processor_id(smp_processor_id()))) {
+ mutex_unlock(&ref->lock);
+ pr_err("thread-imc: Unable to stop the counters\
+ for core %d\n", core_id);
+ return;
+ }
+ } else if (ref->refc < 0) {
+ ref->refc = 0;
+ }
+ mutex_unlock(&ref->lock);
}
/* update_pmu_ops : Populate the appropriate operations for "pmu" */
@@ -1087,7 +1082,6 @@ static int update_pmu_ops(struct imc_pmu *pmu)
break;
case IMC_DOMAIN_THREAD:
pmu->pmu.event_init = thread_imc_event_init;
- pmu->pmu.sched_task = thread_imc_pmu_sched_task;
pmu->pmu.add = thread_imc_event_add;
pmu->pmu.del = thread_imc_event_del;
pmu->pmu.start_txn = thread_imc_pmu_start_txn;