summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorRavi Bangoria <ravi.bangoria@amd.com>2022-04-29 08:14:41 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-06-14 19:11:36 +0300
commitddb1a77f94d7b4a03a32a41358275969d393f5a8 (patch)
treed9f5149e61d94ef7c715762d3c312455fe55feec /arch
parentf2e2e934d2b6fbd6847de8211e65485d52fc4ed1 (diff)
downloadlinux-ddb1a77f94d7b4a03a32a41358275969d393f5a8.tar.xz
perf/amd/ibs: Use interrupt regs ip for stack unwinding
[ Upstream commit 3d47083b9ff46863e8374ad3bb5edb5e464c75f8 ] IbsOpRip is recorded when IBS interrupt is triggered. But there is a skid from the time IBS interrupt gets triggered to the time the interrupt is presented to the core. Meanwhile processor would have moved ahead and thus IbsOpRip will be inconsistent with rsp and rbp recorded as part of the interrupt regs. This causes issues while unwinding stack using the ORC unwinder as it needs consistent rip, rsp and rbp. Fix this by using rip from interrupt regs instead of IbsOpRip for stack unwinding. Fixes: ee9f8fce99640 ("x86/unwind: Add the ORC unwinder") Reported-by: Dmitry Monakhov <dmtrmonakhov@yandex-team.ru> Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Ravi Bangoria <ravi.bangoria@amd.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20220429051441.14251-1-ravi.bangoria@amd.com Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/events/amd/ibs.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index b7baaa973317..2e930d8c04d9 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -312,6 +312,16 @@ static int perf_ibs_init(struct perf_event *event)
hwc->config_base = perf_ibs->msr;
hwc->config = config;
+ /*
+ * rip recorded by IbsOpRip will not be consistent with rsp and rbp
+ * recorded as part of interrupt regs. Thus we need to use rip from
+ * interrupt regs while unwinding call stack. Setting _EARLY flag
+ * makes sure we unwind call-stack before perf sample rip is set to
+ * IbsOpRip.
+ */
+ if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
+ event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
+
return 0;
}
@@ -683,6 +693,14 @@ fail:
data.raw = &raw;
}
+ /*
+ * rip recorded by IbsOpRip will not be consistent with rsp and rbp
+ * recorded as part of interrupt regs. Thus we need to use rip from
+ * interrupt regs while unwinding call stack.
+ */
+ if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
+ data.callchain = perf_callchain(event, iregs);
+
throttle = perf_event_overflow(event, &data, &regs);
out:
if (throttle) {