summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorSasha Levin <sashal@kernel.org>2020-10-13 06:28:52 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-10-29 12:07:17 +0300
commit7e5248ec07bc30cdbc498a33f950c69bf7d0c37b (patch)
tree8cb7a7bf97647f8af0b32734394189b37684ef75 /arch
parent2df4319976f9f6c3ead0f78d3905dadc86aa4c24 (diff)
downloadlinux-7e5248ec07bc30cdbc498a33f950c69bf7d0c37b.tar.xz
perf/x86: Fix n_pair for cancelled txn
[ Upstream commit 871a93b0aad65a7f44ee25f2d17932ef6d559850 ] Kan reported that n_metric gets corrupted for cancelled transactions; a similar issue exists for n_pair for AMD's Large Increment thing. The problem was confirmed and confirmed fixed by Kim using: sudo perf stat -e "{cycles,cycles,cycles,cycles}:D" -a sleep 10 & # should succeed: sudo perf stat -e "{fp_ret_sse_avx_ops.all}:D" -a workload # should fail: sudo perf stat -e "{fp_ret_sse_avx_ops.all,fp_ret_sse_avx_ops.all,cycles}:D" -a workload # previously failed, now succeeds with this patch: sudo perf stat -e "{fp_ret_sse_avx_ops.all}:D" -a workload Fixes: 5738891229a2 ("perf/x86/amd: Add support for Large Increment per Cycle Events") Reported-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Kim Phillips <kim.phillips@amd.com> Link: https://lkml.kernel.org/r/20201005082516.GG2628@hirez.programming.kicks-ass.net Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/events/core.c6
-rw-r--r--arch/x86/events/perf_event.h1
2 files changed, 6 insertions, 1 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 4103665c6e03..29640b4079af 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1087,8 +1087,10 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
cpuc->event_list[n] = event;
n++;
- if (is_counter_pair(&event->hw))
+ if (is_counter_pair(&event->hw)) {
cpuc->n_pair++;
+ cpuc->n_txn_pair++;
+ }
}
return n;
}
@@ -1953,6 +1955,7 @@ static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
perf_pmu_disable(pmu);
__this_cpu_write(cpu_hw_events.n_txn, 0);
+ __this_cpu_write(cpu_hw_events.n_txn_pair, 0);
}
/*
@@ -1978,6 +1981,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu)
*/
__this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
__this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
+ __this_cpu_sub(cpu_hw_events.n_pair, __this_cpu_read(cpu_hw_events.n_txn_pair));
perf_pmu_enable(pmu);
}
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index e17a3d8a47ed..d4d482d16fe1 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -198,6 +198,7 @@ struct cpu_hw_events {
they've never been enabled yet */
int n_txn; /* the # last events in the below arrays;
added in the current transaction */
+ int n_txn_pair;
int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
u64 tags[X86_PMC_IDX_MAX];