summaryrefslogtreecommitdiff
path: root/arch/x86/events
diff options
context:
space:
mode:
authorDapeng Mi <dapeng1.mi@linux.intel.com>2023-05-04 10:21:28 +0300
committerPeter Zijlstra <peterz@infradead.org>2023-05-08 11:58:32 +0300
commit10d95a317ec12ec7dd4587a646c6bd6aa03c7ded (patch)
treed367fcd8b079ac14723dd09d0b6fce3cb4abfe0f /arch/x86/events
parent78075d947534013b4575687d19ebcbbb6d3addcd (diff)
downloadlinux-10d95a317ec12ec7dd4587a646c6bd6aa03c7ded.tar.xz
perf/x86/intel: Define bit macros for FixCntrCtl MSR
Define bit macros for FixCntrCtl MSR and replace the bit hardcoding with these bit macros. This would make code be more human-readable. Perf commands 'perf stat -e "instructions,cycles,ref-cycles"' and 'perf record -e "instructions,cycles,ref-cycles"' pass. Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20230504072128.3653470-1-dapeng1.mi@linux.intel.com
Diffstat (limited to 'arch/x86/events')
-rw-r--r--arch/x86/events/intel/core.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 070cc4ef2672..0d09245aa8df 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2451,7 +2451,7 @@ static void intel_pmu_disable_fixed(struct perf_event *event)
intel_clear_masks(event, idx);
- mask = 0xfULL << ((idx - INTEL_PMC_IDX_FIXED) * 4);
+ mask = intel_fixed_bits_by_idx(idx - INTEL_PMC_IDX_FIXED, INTEL_FIXED_BITS_MASK);
cpuc->fixed_ctrl_val &= ~mask;
}
@@ -2750,25 +2750,25 @@ static void intel_pmu_enable_fixed(struct perf_event *event)
* if requested:
*/
if (!event->attr.precise_ip)
- bits |= 0x8;
+ bits |= INTEL_FIXED_0_ENABLE_PMI;
if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
- bits |= 0x2;
+ bits |= INTEL_FIXED_0_USER;
if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
- bits |= 0x1;
+ bits |= INTEL_FIXED_0_KERNEL;
/*
* ANY bit is supported in v3 and up
*/
if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
- bits |= 0x4;
+ bits |= INTEL_FIXED_0_ANYTHREAD;
idx -= INTEL_PMC_IDX_FIXED;
- bits <<= (idx * 4);
- mask = 0xfULL << (idx * 4);
+ bits = intel_fixed_bits_by_idx(idx, bits);
+ mask = intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK);
if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
- bits |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
- mask |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
+ bits |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
+ mask |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
}
cpuc->fixed_ctrl_val &= ~mask;