summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/sysctl/kernel.txt13
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/x86/Kbuild3
-rw-r--r--arch/x86/events/Makefile13
-rw-r--r--arch/x86/events/amd/core.c (renamed from arch/x86/kernel/cpu/perf_event_amd.c)2
-rw-r--r--arch/x86/events/amd/ibs.c (renamed from arch/x86/kernel/cpu/perf_event_amd_ibs.c)12
-rw-r--r--arch/x86/events/amd/iommu.c (renamed from arch/x86/kernel/cpu/perf_event_amd_iommu.c)4
-rw-r--r--arch/x86/events/amd/iommu.h (renamed from arch/x86/kernel/cpu/perf_event_amd_iommu.h)0
-rw-r--r--arch/x86/events/amd/uncore.c (renamed from arch/x86/kernel/cpu/perf_event_amd_uncore.c)4
-rw-r--r--arch/x86/events/core.c (renamed from arch/x86/kernel/cpu/perf_event.c)22
-rw-r--r--arch/x86/events/intel/bts.c (renamed from arch/x86/kernel/cpu/perf_event_intel_bts.c)2
-rw-r--r--arch/x86/events/intel/core.c (renamed from arch/x86/kernel/cpu/perf_event_intel.c)31
-rw-r--r--arch/x86/events/intel/cqm.c (renamed from arch/x86/kernel/cpu/perf_event_intel_cqm.c)34
-rw-r--r--arch/x86/events/intel/cstate.c (renamed from arch/x86/kernel/cpu/perf_event_intel_cstate.c)2
-rw-r--r--arch/x86/events/intel/ds.c (renamed from arch/x86/kernel/cpu/perf_event_intel_ds.c)56
-rw-r--r--arch/x86/events/intel/knc.c (renamed from arch/x86/kernel/cpu/perf_event_knc.c)6
-rw-r--r--arch/x86/events/intel/lbr.c (renamed from arch/x86/kernel/cpu/perf_event_intel_lbr.c)2
-rw-r--r--arch/x86/events/intel/p4.c (renamed from arch/x86/kernel/cpu/perf_event_p4.c)2
-rw-r--r--arch/x86/events/intel/p6.c (renamed from arch/x86/kernel/cpu/perf_event_p6.c)2
-rw-r--r--arch/x86/events/intel/pt.c (renamed from arch/x86/kernel/cpu/perf_event_intel_pt.c)4
-rw-r--r--arch/x86/events/intel/pt.h (renamed from arch/x86/kernel/cpu/intel_pt.h)0
-rw-r--r--arch/x86/events/intel/rapl.c (renamed from arch/x86/kernel/cpu/perf_event_intel_rapl.c)412
-rw-r--r--arch/x86/events/intel/uncore.c (renamed from arch/x86/kernel/cpu/perf_event_intel_uncore.c)677
-rw-r--r--arch/x86/events/intel/uncore.h (renamed from arch/x86/kernel/cpu/perf_event_intel_uncore.h)55
-rw-r--r--arch/x86/events/intel/uncore_nhmex.c (renamed from arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c)8
-rw-r--r--arch/x86/events/intel/uncore_snb.c (renamed from arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c)16
-rw-r--r--arch/x86/events/intel/uncore_snbep.c (renamed from arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c)21
-rw-r--r--arch/x86/events/msr.c (renamed from arch/x86/kernel/cpu/perf_event_msr.c)0
-rw-r--r--arch/x86/events/perf_event.h (renamed from arch/x86/kernel/cpu/perf_event.h)5
-rw-r--r--arch/x86/include/asm/elf.h2
-rw-r--r--arch/x86/include/asm/perf_event.h1
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/topology.h11
-rw-r--r--arch/x86/kernel/apic/apic.c14
-rw-r--r--arch/x86/kernel/cpu/Makefile24
-rw-r--r--arch/x86/kernel/cpu/amd.c23
-rw-r--r--arch/x86/kernel/cpu/bugs_64.c2
-rw-r--r--arch/x86/kernel/cpu/centaur.c10
-rw-r--r--arch/x86/kernel/cpu/common.c44
-rw-r--r--arch/x86/kernel/cpu/cyrix.c10
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c2
-rw-r--r--arch/x86/kernel/cpu/intel.c23
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-inject.c15
-rw-r--r--arch/x86/kernel/cpu/mcheck/p5.c18
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c15
-rw-r--r--arch/x86/kernel/cpu/mcheck/threshold.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/winchip.c5
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c2
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c8
-rw-r--r--arch/x86/kernel/cpu/mtrr/centaur.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/cleanup.c44
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c23
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c20
-rw-r--r--arch/x86/kernel/cpu/rdrand.c2
-rw-r--r--arch/x86/kernel/cpu/topology.c4
-rw-r--r--arch/x86/kernel/cpu/transmeta.c8
-rw-r--r--arch/x86/kernel/cpu/vmware.c5
-rw-r--r--arch/x86/kernel/mpparse.c2
-rw-r--r--arch/x86/kernel/nmi.c3
-rw-r--r--arch/x86/kernel/smpboot.c100
-rw-r--r--arch/x86/lguest/boot.c2
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--arch/x86/xen/pmu.c2
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--kernel/events/core.c29
-rw-r--r--kernel/trace/trace_kprobe.c19
-rw-r--r--kernel/trace/trace_syscalls.c16
-rw-r--r--lib/cpumask.c1
-rw-r--r--tools/build/Makefile.build2
-rw-r--r--tools/build/Makefile.feature31
-rw-r--r--tools/build/feature/Makefile4
-rw-r--r--tools/build/feature/test-all.c5
-rw-r--r--tools/build/feature/test-compile.c2
-rw-r--r--tools/build/feature/test-libcrypto.c17
-rw-r--r--tools/lib/api/Build1
-rw-r--r--tools/lib/api/Makefile1
-rw-r--r--tools/lib/api/debug-internal.h20
-rw-r--r--tools/lib/api/debug.c28
-rw-r--r--tools/lib/api/debug.h10
-rw-r--r--tools/lib/api/fs/fs.c64
-rw-r--r--tools/lib/api/fs/fs.h3
-rw-r--r--tools/lib/bpf/libbpf.c34
-rw-r--r--tools/lib/traceevent/event-parse.c156
-rw-r--r--tools/lib/traceevent/event-parse.h13
-rw-r--r--tools/perf/Documentation/perf-config.txt357
-rw-r--r--tools/perf/Documentation/perf-inject.txt7
-rw-r--r--tools/perf/Documentation/perf-record.txt6
-rw-r--r--tools/perf/Documentation/perf-report.txt40
-rw-r--r--tools/perf/Documentation/perf-stat.txt35
-rw-r--r--tools/perf/Documentation/perf-top.txt3
-rw-r--r--tools/perf/Documentation/perfconfig.example2
-rw-r--r--tools/perf/Documentation/tips.txt1
-rw-r--r--tools/perf/Makefile25
-rw-r--r--tools/perf/Makefile.perf16
-rw-r--r--tools/perf/arch/arm/Makefile1
-rw-r--r--tools/perf/arch/arm64/Makefile1
-rw-r--r--tools/perf/arch/powerpc/Makefile3
-rw-r--r--tools/perf/arch/powerpc/util/Build1
-rw-r--r--tools/perf/arch/powerpc/util/book3s_hcalls.h123
-rw-r--r--tools/perf/arch/powerpc/util/book3s_hv_exits.h33
-rw-r--r--tools/perf/arch/powerpc/util/kvm-stat.c170
-rw-r--r--tools/perf/arch/s390/util/kvm-stat.c10
-rw-r--r--tools/perf/arch/x86/Makefile1
-rw-r--r--tools/perf/arch/x86/tests/rdpmc.c3
-rw-r--r--tools/perf/arch/x86/util/intel-bts.c4
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c6
-rw-r--r--tools/perf/arch/x86/util/kvm-stat.c16
-rw-r--r--tools/perf/bench/mem-memcpy-x86-64-asm.S5
-rw-r--r--tools/perf/builtin-annotate.c2
-rw-r--r--tools/perf/builtin-buildid-cache.c14
-rw-r--r--tools/perf/builtin-config.c27
-rw-r--r--tools/perf/builtin-diff.c2
-rw-r--r--tools/perf/builtin-help.c5
-rw-r--r--tools/perf/builtin-inject.c105
-rw-r--r--tools/perf/builtin-kmem.c4
-rw-r--r--tools/perf/builtin-kvm.c38
-rw-r--r--tools/perf/builtin-mem.c84
-rw-r--r--tools/perf/builtin-record.c197
-rw-r--r--tools/perf/builtin-report.c58
-rw-r--r--tools/perf/builtin-script.c155
-rw-r--r--tools/perf/builtin-stat.c567
-rw-r--r--tools/perf/builtin-top.c43
-rw-r--r--tools/perf/builtin-trace.c54
-rw-r--r--tools/perf/config/Makefile119
-rw-r--r--tools/perf/jvmti/Makefile89
-rw-r--r--tools/perf/jvmti/jvmti_agent.c465
-rw-r--r--tools/perf/jvmti/jvmti_agent.h36
-rw-r--r--tools/perf/jvmti/libjvmti.c304
-rw-r--r--tools/perf/perf.c18
-rw-r--r--tools/perf/perf.h2
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py5
-rw-r--r--tools/perf/tests/.gitignore1
-rw-r--r--tools/perf/tests/Build9
-rw-r--r--tools/perf/tests/bp_signal.c140
-rw-r--r--tools/perf/tests/bpf-script-test-relocation.c50
-rw-r--r--tools/perf/tests/bpf.c65
-rw-r--r--tools/perf/tests/code-reading.c10
-rw-r--r--tools/perf/tests/hists_cumulate.c2
-rw-r--r--tools/perf/tests/hists_filter.c2
-rw-r--r--tools/perf/tests/hists_output.c10
-rw-r--r--tools/perf/tests/llvm.c25
-rw-r--r--tools/perf/tests/llvm.h5
-rw-r--r--tools/perf/tests/make50
-rw-r--r--tools/perf/tests/parse-events.c54
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c24
-rw-r--r--tools/perf/ui/browser.c4
-rw-r--r--tools/perf/ui/browser.h2
-rw-r--r--tools/perf/ui/browsers/annotate.c2
-rw-r--r--tools/perf/ui/browsers/hists.c885
-rw-r--r--tools/perf/ui/gtk/hists.c199
-rw-r--r--tools/perf/ui/hist.c262
-rw-r--r--tools/perf/ui/stdio/hist.c302
-rw-r--r--tools/perf/util/Build10
-rw-r--r--tools/perf/util/auxtrace.c7
-rw-r--r--tools/perf/util/auxtrace.h6
-rw-r--r--tools/perf/util/bpf-loader.c724
-rw-r--r--tools/perf/util/bpf-loader.h59
-rw-r--r--tools/perf/util/build-id.c50
-rw-r--r--tools/perf/util/build-id.h1
-rw-r--r--tools/perf/util/cache.h3
-rw-r--r--tools/perf/util/callchain.c102
-rw-r--r--tools/perf/util/color.c5
-rw-r--r--tools/perf/util/config.c4
-rw-r--r--tools/perf/util/cpumap.c30
-rw-r--r--tools/perf/util/cpumap.h32
-rw-r--r--tools/perf/util/ctype.c9
-rw-r--r--tools/perf/util/data-convert-bt.c138
-rw-r--r--tools/perf/util/debug.c111
-rw-r--r--tools/perf/util/debug.h1
-rw-r--r--tools/perf/util/demangle-java.c199
-rw-r--r--tools/perf/util/demangle-java.h10
-rw-r--r--tools/perf/util/dso.c5
-rw-r--r--tools/perf/util/env.c13
-rw-r--r--tools/perf/util/env.h15
-rw-r--r--tools/perf/util/event.c2
-rw-r--r--tools/perf/util/evlist.c43
-rw-r--r--tools/perf/util/evlist.h3
-rw-r--r--tools/perf/util/evsel.c30
-rw-r--r--tools/perf/util/evsel.h14
-rw-r--r--tools/perf/util/genelf.c449
-rw-r--r--tools/perf/util/genelf.h67
-rw-r--r--tools/perf/util/genelf_debug.c610
-rw-r--r--tools/perf/util/header.c270
-rw-r--r--tools/perf/util/header.h1
-rw-r--r--tools/perf/util/help-unknown-cmd.c5
-rw-r--r--tools/perf/util/hist.c841
-rw-r--r--tools/perf/util/hist.h120
-rw-r--r--tools/perf/util/jit.h15
-rw-r--r--tools/perf/util/jitdump.c697
-rw-r--r--tools/perf/util/jitdump.h124
-rw-r--r--tools/perf/util/kvm-stat.h8
-rw-r--r--tools/perf/util/machine.h10
-rw-r--r--tools/perf/util/mem-events.c255
-rw-r--r--tools/perf/util/mem-events.h35
-rw-r--r--tools/perf/util/parse-events.c314
-rw-r--r--tools/perf/util/parse-events.h28
-rw-r--r--tools/perf/util/parse-events.l19
-rw-r--r--tools/perf/util/parse-events.y184
-rw-r--r--tools/perf/util/pmu.c34
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c3
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c7
-rw-r--r--tools/perf/util/session.c40
-rw-r--r--tools/perf/util/setup.py4
-rw-r--r--tools/perf/util/sort.c761
-rw-r--r--tools/perf/util/sort.h29
-rw-r--r--tools/perf/util/stat-shadow.c225
-rw-r--r--tools/perf/util/stat.c14
-rw-r--r--tools/perf/util/stat.h24
-rw-r--r--tools/perf/util/strbuf.c24
-rw-r--r--tools/perf/util/strbuf.h2
-rw-r--r--tools/perf/util/symbol-elf.c3
-rw-r--r--tools/perf/util/symbol.c10
-rw-r--r--tools/perf/util/symbol.h3
-rw-r--r--tools/perf/util/trace-event.c1
-rw-r--r--tools/perf/util/tsc.c2
-rw-r--r--tools/perf/util/util.c112
-rw-r--r--tools/perf/util/util.h25
-rw-r--r--tools/power/x86/turbostat/turbostat.c8
219 files changed, 12250 insertions, 2446 deletions
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index a93b414672a7..f886fbb1ad05 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -58,6 +58,8 @@ show up in /proc/sys/kernel:
- panic_on_stackoverflow
- panic_on_unrecovered_nmi
- panic_on_warn
+- perf_cpu_time_max_percent
+- perf_event_paranoid
- pid_max
- powersave-nap [ PPC only ]
- printk
@@ -639,6 +641,17 @@ allowed to execute.
==============================================================
+perf_event_paranoid:
+
+Controls use of the performance events system by unprivileged
+users (without CAP_SYS_ADMIN). The default value is 1.
+
+ -1: Allow use of (almost) all events by all users
+>=0: Disallow raw tracepoint access by users without CAP_IOC_LOCK
+>=1: Disallow CPU event access by users without CAP_SYS_ADMIN
+>=2: Disallow kernel profiling by users without CAP_SYS_ADMIN
+
+==============================================================
pid_max:
diff --git a/MAINTAINERS b/MAINTAINERS
index 6ee06ea47be4..2061ea77667c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8475,6 +8475,7 @@ PERFORMANCE EVENTS SUBSYSTEM
M: Peter Zijlstra <peterz@infradead.org>
M: Ingo Molnar <mingo@redhat.com>
M: Arnaldo Carvalho de Melo <acme@kernel.org>
+R: Alexander Shishkin <alexander.shishkin@linux.intel.com>
L: linux-kernel@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
S: Supported
diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild
index 1538562cc720..eb3abf8ac44e 100644
--- a/arch/x86/Kbuild
+++ b/arch/x86/Kbuild
@@ -1,6 +1,7 @@
-
obj-y += entry/
+obj-$(CONFIG_PERF_EVENTS) += events/
+
obj-$(CONFIG_KVM) += kvm/
# Xen paravirtualization support
diff --git a/arch/x86/events/Makefile b/arch/x86/events/Makefile
new file mode 100644
index 000000000000..fdfea1511cc0
--- /dev/null
+++ b/arch/x86/events/Makefile
@@ -0,0 +1,13 @@
+obj-y += core.o
+
+obj-$(CONFIG_CPU_SUP_AMD) += amd/core.o amd/uncore.o
+obj-$(CONFIG_X86_LOCAL_APIC) += amd/ibs.o msr.o
+ifdef CONFIG_AMD_IOMMU
+obj-$(CONFIG_CPU_SUP_AMD) += amd/iommu.o
+endif
+obj-$(CONFIG_CPU_SUP_INTEL) += intel/core.o intel/bts.o intel/cqm.o
+obj-$(CONFIG_CPU_SUP_INTEL) += intel/cstate.o intel/ds.o intel/knc.o
+obj-$(CONFIG_CPU_SUP_INTEL) += intel/lbr.o intel/p4.o intel/p6.o intel/pt.o
+obj-$(CONFIG_CPU_SUP_INTEL) += intel/rapl.o msr.o
+obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += intel/uncore.o intel/uncore_nhmex.o
+obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += intel/uncore_snb.o intel/uncore_snbep.o
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/events/amd/core.c
index 58610539b048..049ada8d4e9c 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/events/amd/core.c
@@ -5,7 +5,7 @@
#include <linux/slab.h>
#include <asm/apicdef.h>
-#include "perf_event.h"
+#include "../perf_event.h"
static __initconst const u64 amd_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/events/amd/ibs.c
index 989d3c215d2b..51087c29b2c2 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -14,7 +14,7 @@
#include <asm/apic.h>
-#include "perf_event.h"
+#include "../perf_event.h"
static u32 ibs_caps;
@@ -670,7 +670,7 @@ static __init int perf_event_ibs_init(void)
perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
- printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps);
+ pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps);
return 0;
}
@@ -774,14 +774,14 @@ static int setup_ibs_ctl(int ibs_eilvt_off)
pci_read_config_dword(cpu_cfg, IBSCTL, &value);
if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
pci_dev_put(cpu_cfg);
- printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
- "IBSCTL = 0x%08x\n", value);
+ pr_debug("Failed to setup IBS LVT offset, IBSCTL = 0x%08x\n",
+ value);
return -EINVAL;
}
} while (1);
if (!nodes) {
- printk(KERN_DEBUG "No CPU node configured for IBS\n");
+ pr_debug("No CPU node configured for IBS\n");
return -ENODEV;
}
@@ -810,7 +810,7 @@ static void force_ibs_eilvt_setup(void)
preempt_enable();
if (offset == APIC_EILVT_NR_MAX) {
- printk(KERN_DEBUG "No EILVT entry available\n");
+ pr_debug("No EILVT entry available\n");
return;
}
diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/events/amd/iommu.c
index 97242a9242bd..635e5eba0caf 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
+++ b/arch/x86/events/amd/iommu.c
@@ -16,8 +16,8 @@
#include <linux/cpumask.h>
#include <linux/slab.h>
-#include "perf_event.h"
-#include "perf_event_amd_iommu.h"
+#include "../perf_event.h"
+#include "iommu.h"
#define COUNTER_SHIFT 16
diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.h b/arch/x86/events/amd/iommu.h
index 845d173278e3..845d173278e3 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.h
+++ b/arch/x86/events/amd/iommu.h
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/events/amd/uncore.c
index 8836fc9fa84b..3db9569e658c 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -538,7 +538,7 @@ static int __init amd_uncore_init(void)
if (ret)
goto fail_nb;
- printk(KERN_INFO "perf: AMD NB counters detected\n");
+ pr_info("perf: AMD NB counters detected\n");
ret = 0;
}
@@ -552,7 +552,7 @@ static int __init amd_uncore_init(void)
if (ret)
goto fail_l2;
- printk(KERN_INFO "perf: AMD L2I counters detected\n");
+ pr_info("perf: AMD L2I counters detected\n");
ret = 0;
}
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/events/core.c
index 1b443db2db50..5e830d0c95c9 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/events/core.c
@@ -254,15 +254,16 @@ static bool check_hw_exists(void)
* We still allow the PMU driver to operate:
*/
if (bios_fail) {
- printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n");
- printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg_fail, val_fail);
+ pr_cont("Broken BIOS detected, complain to your hardware vendor.\n");
+ pr_err(FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n",
+ reg_fail, val_fail);
}
return true;
msr_fail:
- printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
- printk("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
+ pr_cont("Broken PMU hardware detected, using software events only.\n");
+ pr_info("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR,
reg, val_new);
@@ -596,6 +597,19 @@ void x86_pmu_disable_all(void)
}
}
+/*
+ * There may be PMI landing after enabled=0. The PMI hitting could be before or
+ * after disable_all.
+ *
+ * If PMI hits before disable_all, the PMU will be disabled in the NMI handler.
+ * It will not be re-enabled in the NMI handler again, because enabled=0. After
+ * handling the NMI, disable_all will be called, which will not change the
+ * state either. If PMI hits after disable_all, the PMU is already disabled
+ * before entering NMI handler. The NMI handler will not change the state
+ * either.
+ *
+ * So either situation is harmless.
+ */
static void x86_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_bts.c b/arch/x86/events/intel/bts.c
index 2cad71d1b14c..b99dc9258c0f 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -26,7 +26,7 @@
#include <asm-generic/sizes.h>
#include <asm/perf_event.h>
-#include "perf_event.h"
+#include "../perf_event.h"
struct bts_ctx {
struct perf_output_handle handle;
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/events/intel/core.c
index fed2ab1f1065..68fa55b4d42e 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/events/intel/core.c
@@ -18,7 +18,7 @@
#include <asm/hardirq.h>
#include <asm/apic.h>
-#include "perf_event.h"
+#include "../perf_event.h"
/*
* Intel PerfMon, used on Core and later.
@@ -1502,7 +1502,15 @@ static __initconst const u64 knl_hw_cache_extra_regs
};
/*
- * Use from PMIs where the LBRs are already disabled.
+ * Used from PMIs where the LBRs are already disabled.
+ *
+ * This function could be called consecutively. It is required to remain in
+ * disabled state if called consecutively.
+ *
+ * During consecutive calls, the same disable value will be written to related
+ * registers, so the PMU state remains unchanged. hw.state in
+ * intel_bts_disable_local will remain PERF_HES_STOPPED too in consecutive
+ * calls.
*/
static void __intel_pmu_disable_all(void)
{
@@ -1884,6 +1892,16 @@ again:
if (__test_and_clear_bit(62, (unsigned long *)&status)) {
handled++;
x86_pmu.drain_pebs(regs);
+ /*
+ * There are cases where, even though, the PEBS ovfl bit is set
+ * in GLOBAL_OVF_STATUS, the PEBS events may also have their
+ * overflow bits set for their counters. We must clear them
+ * here because they have been processed as exact samples in
+ * the drain_pebs() routine. They must not be processed again
+ * in the for_each_bit_set() loop for regular samples below.
+ */
+ status &= ~cpuc->pebs_enabled;
+ status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
}
/*
@@ -1929,7 +1947,10 @@ again:
goto again;
done:
- __intel_pmu_enable_all(0, true);
+ /* Only restore PMU state when it's active. See x86_pmu_disable(). */
+ if (cpuc->enabled)
+ __intel_pmu_enable_all(0, true);
+
/*
* Only unmask the NMI after the overflow counters
* have been reset. This avoids spurious NMIs on
@@ -3396,6 +3417,7 @@ __init int intel_pmu_init(void)
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
+ intel_pmu_pebs_data_source_nhm();
x86_add_quirk(intel_nehalem_quirk);
pr_cont("Nehalem events, ");
@@ -3459,6 +3481,7 @@ __init int intel_pmu_init(void)
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
+ intel_pmu_pebs_data_source_nhm();
pr_cont("Westmere events, ");
break;
@@ -3581,7 +3604,7 @@ __init int intel_pmu_init(void)
intel_pmu_lbr_init_hsw();
x86_pmu.event_constraints = intel_bdw_event_constraints;
- x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
+ x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
x86_pmu.extra_regs = intel_snbep_extra_regs;
x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
x86_pmu.pebs_prec_dist = true;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/events/intel/cqm.c
index a316ca96f1b6..93cb412a5579 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -7,7 +7,7 @@
#include <linux/perf_event.h>
#include <linux/slab.h>
#include <asm/cpu_device_id.h>
-#include "perf_event.h"
+#include "../perf_event.h"
#define MSR_IA32_PQR_ASSOC 0x0c8f
#define MSR_IA32_QM_CTR 0x0c8e
@@ -1244,15 +1244,12 @@ static struct pmu intel_cqm_pmu = {
static inline void cqm_pick_event_reader(int cpu)
{
- int phys_id = topology_physical_package_id(cpu);
- int i;
+ int reader;
- for_each_cpu(i, &cqm_cpumask) {
- if (phys_id == topology_physical_package_id(i))
- return; /* already got reader for this socket */
- }
-
- cpumask_set_cpu(cpu, &cqm_cpumask);
+ /* First online cpu in package becomes the reader */
+ reader = cpumask_any_and(&cqm_cpumask, topology_core_cpumask(cpu));
+ if (reader >= nr_cpu_ids)
+ cpumask_set_cpu(cpu, &cqm_cpumask);
}
static void intel_cqm_cpu_starting(unsigned int cpu)
@@ -1270,24 +1267,17 @@ static void intel_cqm_cpu_starting(unsigned int cpu)
static void intel_cqm_cpu_exit(unsigned int cpu)
{
- int phys_id = topology_physical_package_id(cpu);
- int i;
+ int target;
- /*
- * Is @cpu a designated cqm reader?
- */
+ /* Is @cpu the current cqm reader for this package ? */
if (!cpumask_test_and_clear_cpu(cpu, &cqm_cpumask))
return;
- for_each_online_cpu(i) {
- if (i == cpu)
- continue;
+ /* Find another online reader in this package */
+ target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
- if (phys_id == topology_physical_package_id(i)) {
- cpumask_set_cpu(i, &cqm_cpumask);
- break;
- }
- }
+ if (target < nr_cpu_ids)
+ cpumask_set_cpu(target, &cqm_cpumask);
}
static int intel_cqm_cpu_notifier(struct notifier_block *nb,
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cstate.c b/arch/x86/events/intel/cstate.c
index 75a38b5a2e26..7946c4231169 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -89,7 +89,7 @@
#include <linux/slab.h>
#include <linux/perf_event.h>
#include <asm/cpu_device_id.h>
-#include "perf_event.h"
+#include "../perf_event.h"
#define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \
static ssize_t __cstate_##_var##_show(struct kobject *kobj, \
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/events/intel/ds.c
index 10602f0a438f..ce7211a07c0b 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -5,7 +5,7 @@
#include <asm/perf_event.h>
#include <asm/insn.h>
-#include "perf_event.h"
+#include "../perf_event.h"
/* The size of a BTS record in bytes: */
#define BTS_RECORD_SIZE 24
@@ -51,7 +51,8 @@ union intel_x86_pebs_dse {
#define OP_LH (P(OP, LOAD) | P(LVL, HIT))
#define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
-static const u64 pebs_data_source[] = {
+/* Version for Sandy Bridge and later */
+static u64 pebs_data_source[] = {
P(OP, LOAD) | P(LVL, MISS) | P(LVL, L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
OP_LH | P(LVL, L1) | P(SNOOP, NONE), /* 0x01: L1 local */
OP_LH | P(LVL, LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */
@@ -70,6 +71,14 @@ static const u64 pebs_data_source[] = {
OP_LH | P(LVL, UNC) | P(SNOOP, NONE), /* 0x0f: uncached */
};
+/* Patch up minor differences in the bits */
+void __init intel_pmu_pebs_data_source_nhm(void)
+{
+ pebs_data_source[0x05] = OP_LH | P(LVL, L3) | P(SNOOP, HIT);
+ pebs_data_source[0x06] = OP_LH | P(LVL, L3) | P(SNOOP, HITM);
+ pebs_data_source[0x07] = OP_LH | P(LVL, L3) | P(SNOOP, HITM);
+}
+
static u64 precise_store_data(u64 status)
{
union intel_x86_pebs_dse dse;
@@ -269,7 +278,7 @@ static int alloc_pebs_buffer(int cpu)
if (!x86_pmu.pebs)
return 0;
- buffer = kzalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL, node);
+ buffer = kzalloc_node(x86_pmu.pebs_buffer_size, GFP_KERNEL, node);
if (unlikely(!buffer))
return -ENOMEM;
@@ -286,7 +295,7 @@ static int alloc_pebs_buffer(int cpu)
per_cpu(insn_buffer, cpu) = ibuffer;
}
- max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
+ max = x86_pmu.pebs_buffer_size / x86_pmu.pebs_record_size;
ds->pebs_buffer_base = (u64)(unsigned long)buffer;
ds->pebs_index = ds->pebs_buffer_base;
@@ -722,6 +731,30 @@ struct event_constraint intel_hsw_pebs_event_constraints[] = {
EVENT_CONSTRAINT_END
};
+struct event_constraint intel_bdw_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
+ INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
+ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
+ /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
+ INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
+ INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
+ INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
+ /* Allow all events as PEBS with no flags */
+ INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
+ EVENT_CONSTRAINT_END
+};
+
+
struct event_constraint intel_skl_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
@@ -1319,19 +1352,28 @@ void __init intel_ds_init(void)
x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
+ x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
if (x86_pmu.pebs) {
char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
int format = x86_pmu.intel_cap.pebs_format;
switch (format) {
case 0:
- printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
+ pr_cont("PEBS fmt0%c, ", pebs_type);
x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
+ /*
+ * Using >PAGE_SIZE buffers makes the WRMSR to
+ * PERF_GLOBAL_CTRL in intel_pmu_enable_all()
+ * mysteriously hang on Core2.
+ *
+ * As a workaround, we don't do this.
+ */
+ x86_pmu.pebs_buffer_size = PAGE_SIZE;
x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
break;
case 1:
- printk(KERN_CONT "PEBS fmt1%c, ", pebs_type);
+ pr_cont("PEBS fmt1%c, ", pebs_type);
x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
break;
@@ -1351,7 +1393,7 @@ void __init intel_ds_init(void)
break;
default:
- printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type);
+ pr_cont("no PEBS fmt%d%c, ", format, pebs_type);
x86_pmu.pebs = 0;
}
}
diff --git a/arch/x86/kernel/cpu/perf_event_knc.c b/arch/x86/events/intel/knc.c
index 5b0c232d1ee6..548d5f774b07 100644
--- a/arch/x86/kernel/cpu/perf_event_knc.c
+++ b/arch/x86/events/intel/knc.c
@@ -5,7 +5,7 @@
#include <asm/hardirq.h>
-#include "perf_event.h"
+#include "../perf_event.h"
static const u64 knc_perfmon_event_map[] =
{
@@ -263,7 +263,9 @@ again:
goto again;
done:
- knc_pmu_enable_all(0);
+ /* Only restore PMU state when it's active. See x86_pmu_disable(). */
+ if (cpuc->enabled)
+ knc_pmu_enable_all(0);
return handled;
}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/events/intel/lbr.c
index 653f88d25987..69dd11887dd1 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -5,7 +5,7 @@
#include <asm/msr.h>
#include <asm/insn.h>
-#include "perf_event.h"
+#include "../perf_event.h"
enum {
LBR_FORMAT_32 = 0x00,
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/events/intel/p4.c
index f2e56783af3d..0a5ede187d9c 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/events/intel/p4.c
@@ -13,7 +13,7 @@
#include <asm/hardirq.h>
#include <asm/apic.h>
-#include "perf_event.h"
+#include "../perf_event.h"
#define P4_CNTR_LIMIT 3
/*
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/events/intel/p6.c
index 7c1a0c07b607..1f5c47ab4c65 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/events/intel/p6.c
@@ -1,7 +1,7 @@
#include <linux/perf_event.h>
#include <linux/types.h>
-#include "perf_event.h"
+#include "../perf_event.h"
/*
* Not sure about some of these
diff --git a/arch/x86/kernel/cpu/perf_event_intel_pt.c b/arch/x86/events/intel/pt.c
index c0bbd1033b7c..6af7cf71d6b2 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -29,8 +29,8 @@
#include <asm/io.h>
#include <asm/intel_pt.h>
-#include "perf_event.h"
-#include "intel_pt.h"
+#include "../perf_event.h"
+#include "pt.h"
static DEFINE_PER_CPU(struct pt, pt_ctx);
diff --git a/arch/x86/kernel/cpu/intel_pt.h b/arch/x86/events/intel/pt.h
index 336878a5d205..336878a5d205 100644
--- a/arch/x86/kernel/cpu/intel_pt.h
+++ b/arch/x86/events/intel/pt.h
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/events/intel/rapl.c
index 24a351ad628d..b834a3f55a01 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -44,11 +44,14 @@
* the duration of the measurement. Tools may use a function such as
* ldexp(raw_count, -32);
*/
+
+#define pr_fmt(fmt) "RAPL PMU: " fmt
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/perf_event.h>
#include <asm/cpu_device_id.h>
-#include "perf_event.h"
+#include "../perf_event.h"
/*
* RAPL energy status counters
@@ -107,7 +110,7 @@ static ssize_t __rapl_##_var##_show(struct kobject *kobj, \
static struct kobj_attribute format_attr_##_var = \
__ATTR(_name, 0444, __rapl_##_var##_show, NULL)
-#define RAPL_CNTR_WIDTH 32 /* 32-bit rapl counters */
+#define RAPL_CNTR_WIDTH 32
#define RAPL_EVENT_ATTR_STR(_name, v, str) \
static struct perf_pmu_events_attr event_attr_##v = { \
@@ -117,23 +120,33 @@ static struct perf_pmu_events_attr event_attr_##v = { \
};
struct rapl_pmu {
- spinlock_t lock;
- int n_active; /* number of active events */
- struct list_head active_list;
- struct pmu *pmu; /* pointer to rapl_pmu_class */
- ktime_t timer_interval; /* in ktime_t unit */
- struct hrtimer hrtimer;
+ raw_spinlock_t lock;
+ int n_active;
+ int cpu;
+ struct list_head active_list;
+ struct pmu *pmu;
+ ktime_t timer_interval;
+ struct hrtimer hrtimer;
};
-static int rapl_hw_unit[NR_RAPL_DOMAINS] __read_mostly; /* 1/2^hw_unit Joule */
-static struct pmu rapl_pmu_class;
+struct rapl_pmus {
+ struct pmu pmu;
+ unsigned int maxpkg;
+ struct rapl_pmu *pmus[];
+};
+
+ /* 1/2^hw_unit Joule */
+static int rapl_hw_unit[NR_RAPL_DOMAINS] __read_mostly;
+static struct rapl_pmus *rapl_pmus;
static cpumask_t rapl_cpu_mask;
-static int rapl_cntr_mask;
+static unsigned int rapl_cntr_mask;
+static u64 rapl_timer_ms;
-static DEFINE_PER_CPU(struct rapl_pmu *, rapl_pmu);
-static DEFINE_PER_CPU(struct rapl_pmu *, rapl_pmu_to_free);
+static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
+{
+ return rapl_pmus->pmus[topology_logical_package_id(cpu)];
+}
-static struct x86_pmu_quirk *rapl_quirks;
static inline u64 rapl_read_counter(struct perf_event *event)
{
u64 raw;
@@ -141,19 +154,10 @@ static inline u64 rapl_read_counter(struct perf_event *event)
return raw;
}
-#define rapl_add_quirk(func_) \
-do { \
- static struct x86_pmu_quirk __quirk __initdata = { \
- .func = func_, \
- }; \
- __quirk.next = rapl_quirks; \
- rapl_quirks = &__quirk; \
-} while (0)
-
static inline u64 rapl_scale(u64 v, int cfg)
{
if (cfg > NR_RAPL_DOMAINS) {
- pr_warn("invalid domain %d, failed to scale data\n", cfg);
+ pr_warn("Invalid domain %d, failed to scale data\n", cfg);
return v;
}
/*
@@ -206,27 +210,21 @@ static void rapl_start_hrtimer(struct rapl_pmu *pmu)
HRTIMER_MODE_REL_PINNED);
}
-static void rapl_stop_hrtimer(struct rapl_pmu *pmu)
-{
- hrtimer_cancel(&pmu->hrtimer);
-}
-
static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
{
- struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
+ struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu, hrtimer);
struct perf_event *event;
unsigned long flags;
if (!pmu->n_active)
return HRTIMER_NORESTART;
- spin_lock_irqsave(&pmu->lock, flags);
+ raw_spin_lock_irqsave(&pmu->lock, flags);
- list_for_each_entry(event, &pmu->active_list, active_entry) {
+ list_for_each_entry(event, &pmu->active_list, active_entry)
rapl_event_update(event);
- }
- spin_unlock_irqrestore(&pmu->lock, flags);
+ raw_spin_unlock_irqrestore(&pmu->lock, flags);
hrtimer_forward_now(hrtimer, pmu->timer_interval);
@@ -260,28 +258,28 @@ static void __rapl_pmu_event_start(struct rapl_pmu *pmu,
static void rapl_pmu_event_start(struct perf_event *event, int mode)
{
- struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
+ struct rapl_pmu *pmu = event->pmu_private;
unsigned long flags;
- spin_lock_irqsave(&pmu->lock, flags);
+ raw_spin_lock_irqsave(&pmu->lock, flags);
__rapl_pmu_event_start(pmu, event);
- spin_unlock_irqrestore(&pmu->lock, flags);
+ raw_spin_unlock_irqrestore(&pmu->lock, flags);
}
static void rapl_pmu_event_stop(struct perf_event *event, int mode)
{
- struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
+ struct rapl_pmu *pmu = event->pmu_private;
struct hw_perf_event *hwc = &event->hw;
unsigned long flags;
- spin_lock_irqsave(&pmu->lock, flags);
+ raw_spin_lock_irqsave(&pmu->lock, flags);
/* mark event as deactivated and stopped */
if (!(hwc->state & PERF_HES_STOPPED)) {
WARN_ON_ONCE(pmu->n_active <= 0);
pmu->n_active--;
if (pmu->n_active == 0)
- rapl_stop_hrtimer(pmu);
+ hrtimer_cancel(&pmu->hrtimer);
list_del(&event->active_entry);
@@ -299,23 +297,23 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode)
hwc->state |= PERF_HES_UPTODATE;
}
- spin_unlock_irqrestore(&pmu->lock, flags);
+ raw_spin_unlock_irqrestore(&pmu->lock, flags);
}
static int rapl_pmu_event_add(struct perf_event *event, int mode)
{
- struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
+ struct rapl_pmu *pmu = event->pmu_private;
struct hw_perf_event *hwc = &event->hw;
unsigned long flags;
- spin_lock_irqsave(&pmu->lock, flags);
+ raw_spin_lock_irqsave(&pmu->lock, flags);
hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
if (mode & PERF_EF_START)
__rapl_pmu_event_start(pmu, event);
- spin_unlock_irqrestore(&pmu->lock, flags);
+ raw_spin_unlock_irqrestore(&pmu->lock, flags);
return 0;
}
@@ -329,15 +327,19 @@ static int rapl_pmu_event_init(struct perf_event *event)
{
u64 cfg = event->attr.config & RAPL_EVENT_MASK;
int bit, msr, ret = 0;
+ struct rapl_pmu *pmu;
/* only look at RAPL events */
- if (event->attr.type != rapl_pmu_class.type)
+ if (event->attr.type != rapl_pmus->pmu.type)
return -ENOENT;
/* check only supported bits are set */
if (event->attr.config & ~RAPL_EVENT_MASK)
return -EINVAL;
+ if (event->cpu < 0)
+ return -EINVAL;
+
/*
* check event is known (determines counter)
*/
@@ -376,6 +378,9 @@ static int rapl_pmu_event_init(struct perf_event *event)
return -EINVAL;
/* must be done before validate_group */
+ pmu = cpu_to_rapl_pmu(event->cpu);
+ event->cpu = pmu->cpu;
+ event->pmu_private = pmu;
event->hw.event_base = msr;
event->hw.config = cfg;
event->hw.idx = bit;
@@ -506,139 +511,62 @@ const struct attribute_group *rapl_attr_groups[] = {
NULL,
};
-static struct pmu rapl_pmu_class = {
- .attr_groups = rapl_attr_groups,
- .task_ctx_nr = perf_invalid_context, /* system-wide only */
- .event_init = rapl_pmu_event_init,
- .add = rapl_pmu_event_add, /* must have */
- .del = rapl_pmu_event_del, /* must have */
- .start = rapl_pmu_event_start,
- .stop = rapl_pmu_event_stop,
- .read = rapl_pmu_event_read,
-};
-
static void rapl_cpu_exit(int cpu)
{
- struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu);
- int i, phys_id = topology_physical_package_id(cpu);
- int target = -1;
+ struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
+ int target;
- /* find a new cpu on same package */
- for_each_online_cpu(i) {
- if (i == cpu)
- continue;
- if (phys_id == topology_physical_package_id(i)) {
- target = i;
- break;
- }
- }
- /*
- * clear cpu from cpumask
- * if was set in cpumask and still some cpu on package,
- * then move to new cpu
- */
- if (cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask) && target >= 0)
- cpumask_set_cpu(target, &rapl_cpu_mask);
+ /* Check if exiting cpu is used for collecting rapl events */
+ if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask))
+ return;
- WARN_ON(cpumask_empty(&rapl_cpu_mask));
- /*
- * migrate events and context to new cpu
- */
- if (target >= 0)
- perf_pmu_migrate_context(pmu->pmu, cpu, target);
+ pmu->cpu = -1;
+ /* Find a new cpu to collect rapl events */
+ target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
- /* cancel overflow polling timer for CPU */
- rapl_stop_hrtimer(pmu);
+ /* Migrate rapl events to the new target */
+ if (target < nr_cpu_ids) {
+ cpumask_set_cpu(target, &rapl_cpu_mask);
+ pmu->cpu = target;
+ perf_pmu_migrate_context(pmu->pmu, cpu, target);
+ }
}
static void rapl_cpu_init(int cpu)
{
- int i, phys_id = topology_physical_package_id(cpu);
+ struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
+ int target;
- /* check if phys_is is already covered */
- for_each_cpu(i, &rapl_cpu_mask) {
- if (phys_id == topology_physical_package_id(i))
- return;
- }
- /* was not found, so add it */
- cpumask_set_cpu(cpu, &rapl_cpu_mask);
-}
-
-static __init void rapl_hsw_server_quirk(void)
-{
/*
- * DRAM domain on HSW server has fixed energy unit which can be
- * different than the unit from power unit MSR.
- * "Intel Xeon Processor E5-1600 and E5-2600 v3 Product Families, V2
- * of 2. Datasheet, September 2014, Reference Number: 330784-001 "
+ * Check if there is an online cpu in the package which collects rapl
+ * events already.
*/
- rapl_hw_unit[RAPL_IDX_RAM_NRG_STAT] = 16;
+ target = cpumask_any_and(&rapl_cpu_mask, topology_core_cpumask(cpu));
+ if (target < nr_cpu_ids)
+ return;
+
+ cpumask_set_cpu(cpu, &rapl_cpu_mask);
+ pmu->cpu = cpu;
}
static int rapl_cpu_prepare(int cpu)
{
- struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu);
- int phys_id = topology_physical_package_id(cpu);
- u64 ms;
+ struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
if (pmu)
return 0;
- if (phys_id < 0)
- return -1;
-
pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
if (!pmu)
- return -1;
- spin_lock_init(&pmu->lock);
+ return -ENOMEM;
+ raw_spin_lock_init(&pmu->lock);
INIT_LIST_HEAD(&pmu->active_list);
-
- pmu->pmu = &rapl_pmu_class;
-
- /*
- * use reference of 200W for scaling the timeout
- * to avoid missing counter overflows.
- * 200W = 200 Joules/sec
- * divide interval by 2 to avoid lockstep (2 * 100)
- * if hw unit is 32, then we use 2 ms 1/200/2
- */
- if (rapl_hw_unit[0] < 32)
- ms = (1000 / (2 * 100)) * (1ULL << (32 - rapl_hw_unit[0] - 1));
- else
- ms = 2;
-
- pmu->timer_interval = ms_to_ktime(ms);
-
+ pmu->pmu = &rapl_pmus->pmu;
+ pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
+ pmu->cpu = -1;
rapl_hrtimer_init(pmu);
-
- /* set RAPL pmu for this cpu for now */
- per_cpu(rapl_pmu, cpu) = pmu;
- per_cpu(rapl_pmu_to_free, cpu) = NULL;
-
- return 0;
-}
-
-static void rapl_cpu_kfree(int cpu)
-{
- struct rapl_pmu *pmu = per_cpu(rapl_pmu_to_free, cpu);
-
- kfree(pmu);
-
- per_cpu(rapl_pmu_to_free, cpu) = NULL;
-}
-
-static int rapl_cpu_dying(int cpu)
-{
- struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu);
-
- if (!pmu)
- return 0;
-
- per_cpu(rapl_pmu, cpu) = NULL;
-
- per_cpu(rapl_pmu_to_free, cpu) = pmu;
-
+ rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
return 0;
}
@@ -651,28 +579,20 @@ static int rapl_cpu_notifier(struct notifier_block *self,
case CPU_UP_PREPARE:
rapl_cpu_prepare(cpu);
break;
- case CPU_STARTING:
- rapl_cpu_init(cpu);
- break;
- case CPU_UP_CANCELED:
- case CPU_DYING:
- rapl_cpu_dying(cpu);
- break;
+
+ case CPU_DOWN_FAILED:
case CPU_ONLINE:
- case CPU_DEAD:
- rapl_cpu_kfree(cpu);
+ rapl_cpu_init(cpu);
break;
+
case CPU_DOWN_PREPARE:
rapl_cpu_exit(cpu);
break;
- default:
- break;
}
-
return NOTIFY_OK;
}
-static int rapl_check_hw_unit(void)
+static int rapl_check_hw_unit(bool apply_quirk)
{
u64 msr_rapl_power_unit_bits;
int i;
@@ -683,28 +603,107 @@ static int rapl_check_hw_unit(void)
for (i = 0; i < NR_RAPL_DOMAINS; i++)
rapl_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL;
+ /*
+ * DRAM domain on HSW server and KNL has fixed energy unit which can be
+ * different than the unit from power unit MSR. See
+ * "Intel Xeon Processor E5-1600 and E5-2600 v3 Product Families, V2
+ * of 2. Datasheet, September 2014, Reference Number: 330784-001 "
+ */
+ if (apply_quirk)
+ rapl_hw_unit[RAPL_IDX_RAM_NRG_STAT] = 16;
+
+ /*
+ * Calculate the timer rate:
+ * Use reference of 200W for scaling the timeout to avoid counter
+ * overflows. 200W = 200 Joules/sec
+ * Divide interval by 2 to avoid lockstep (2 * 100)
+ * if hw unit is 32, then we use 2 ms 1/200/2
+ */
+ rapl_timer_ms = 2;
+ if (rapl_hw_unit[0] < 32) {
+ rapl_timer_ms = (1000 / (2 * 100));
+ rapl_timer_ms *= (1ULL << (32 - rapl_hw_unit[0] - 1));
+ }
+ return 0;
+}
+
+static void __init rapl_advertise(void)
+{
+ int i;
+
+ pr_info("API unit is 2^-32 Joules, %d fixed counters, %llu ms ovfl timer\n",
+ hweight32(rapl_cntr_mask), rapl_timer_ms);
+
+ for (i = 0; i < NR_RAPL_DOMAINS; i++) {
+ if (rapl_cntr_mask & (1 << i)) {
+ pr_info("hw unit of domain %s 2^-%d Joules\n",
+ rapl_domain_names[i], rapl_hw_unit[i]);
+ }
+ }
+}
+
+static int __init rapl_prepare_cpus(void)
+{
+ unsigned int cpu, pkg;
+ int ret;
+
+ for_each_online_cpu(cpu) {
+ pkg = topology_logical_package_id(cpu);
+ if (rapl_pmus->pmus[pkg])
+ continue;
+
+ ret = rapl_cpu_prepare(cpu);
+ if (ret)
+ return ret;
+ rapl_cpu_init(cpu);
+ }
+ return 0;
+}
+
+static void __init cleanup_rapl_pmus(void)
+{
+ int i;
+
+ for (i = 0; i < rapl_pmus->maxpkg; i++)
+ kfree(rapl_pmus->pmus + i);
+ kfree(rapl_pmus);
+}
+
+static int __init init_rapl_pmus(void)
+{
+ int maxpkg = topology_max_packages();
+ size_t size;
+
+ size = sizeof(*rapl_pmus) + maxpkg * sizeof(struct rapl_pmu *);
+ rapl_pmus = kzalloc(size, GFP_KERNEL);
+ if (!rapl_pmus)
+ return -ENOMEM;
+
+ rapl_pmus->maxpkg = maxpkg;
+ rapl_pmus->pmu.attr_groups = rapl_attr_groups;
+ rapl_pmus->pmu.task_ctx_nr = perf_invalid_context;
+ rapl_pmus->pmu.event_init = rapl_pmu_event_init;
+ rapl_pmus->pmu.add = rapl_pmu_event_add;
+ rapl_pmus->pmu.del = rapl_pmu_event_del;
+ rapl_pmus->pmu.start = rapl_pmu_event_start;
+ rapl_pmus->pmu.stop = rapl_pmu_event_stop;
+ rapl_pmus->pmu.read = rapl_pmu_event_read;
return 0;
}
-static const struct x86_cpu_id rapl_cpu_match[] = {
+static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
[0] = { .vendor = X86_VENDOR_INTEL, .family = 6 },
[1] = {},
};
static int __init rapl_pmu_init(void)
{
- struct rapl_pmu *pmu;
- int cpu, ret;
- struct x86_pmu_quirk *quirk;
- int i;
+ bool apply_quirk = false;
+ int ret;
- /*
- * check for Intel processor family 6
- */
if (!x86_match_cpu(rapl_cpu_match))
- return 0;
+ return -ENODEV;
- /* check supported CPU */
switch (boot_cpu_data.x86_model) {
case 42: /* Sandy Bridge */
case 58: /* Ivy Bridge */
@@ -712,7 +711,7 @@ static int __init rapl_pmu_init(void)
rapl_pmu_events_group.attrs = rapl_events_cln_attr;
break;
case 63: /* Haswell-Server */
- rapl_add_quirk(rapl_hsw_server_quirk);
+ apply_quirk = true;
rapl_cntr_mask = RAPL_IDX_SRV;
rapl_pmu_events_group.attrs = rapl_events_srv_attr;
break;
@@ -728,56 +727,41 @@ static int __init rapl_pmu_init(void)
rapl_pmu_events_group.attrs = rapl_events_srv_attr;
break;
case 87: /* Knights Landing */
- rapl_add_quirk(rapl_hsw_server_quirk);
+ apply_quirk = true;
rapl_cntr_mask = RAPL_IDX_KNL;
rapl_pmu_events_group.attrs = rapl_events_knl_attr;
-
+ break;
default:
- /* unsupported */
- return 0;
+ return -ENODEV;
}
- ret = rapl_check_hw_unit();
+
+ ret = rapl_check_hw_unit(apply_quirk);
if (ret)
return ret;
- /* run cpu model quirks */
- for (quirk = rapl_quirks; quirk; quirk = quirk->next)
- quirk->func();
- cpu_notifier_register_begin();
+ ret = init_rapl_pmus();
+ if (ret)
+ return ret;
- for_each_online_cpu(cpu) {
- ret = rapl_cpu_prepare(cpu);
- if (ret)
- goto out;
- rapl_cpu_init(cpu);
- }
+ cpu_notifier_register_begin();
- __perf_cpu_notifier(rapl_cpu_notifier);
+ ret = rapl_prepare_cpus();
+ if (ret)
+ goto out;
- ret = perf_pmu_register(&rapl_pmu_class, "power", -1);
- if (WARN_ON(ret)) {
- pr_info("RAPL PMU detected, registration failed (%d), RAPL PMU disabled\n", ret);
- cpu_notifier_register_done();
- return -1;
- }
+ ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
+ if (ret)
+ goto out;
- pmu = __this_cpu_read(rapl_pmu);
+ __perf_cpu_notifier(rapl_cpu_notifier);
+ cpu_notifier_register_done();
+ rapl_advertise();
+ return 0;
- pr_info("RAPL PMU detected,"
- " API unit is 2^-32 Joules,"
- " %d fixed counters"
- " %llu ms ovfl timer\n",
- hweight32(rapl_cntr_mask),
- ktime_to_ms(pmu->timer_interval));
- for (i = 0; i < NR_RAPL_DOMAINS; i++) {
- if (rapl_cntr_mask & (1 << i)) {
- pr_info("hw unit of domain %s 2^-%d Joules\n",
- rapl_domain_names[i], rapl_hw_unit[i]);
- }
- }
out:
+ pr_warn("Initialization failed (%d), disabled\n", ret);
+ cleanup_rapl_pmus();
cpu_notifier_register_done();
-
- return 0;
+ return ret;
}
device_initcall(rapl_pmu_init);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/events/intel/uncore.c
index 3bf41d413775..7012d18bb293 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1,4 +1,4 @@
-#include "perf_event_intel_uncore.h"
+#include "uncore.h"
static struct intel_uncore_type *empty_uncore[] = { NULL, };
struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
@@ -9,9 +9,9 @@ struct pci_driver *uncore_pci_driver;
/* pci bus to socket mapping */
DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
-struct pci_dev *uncore_extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
+struct pci_extra_dev *uncore_extra_pci_dev;
+static int max_packages;
-static DEFINE_RAW_SPINLOCK(uncore_box_lock);
/* mask of cpus that collect uncore events */
static cpumask_t uncore_cpu_mask;
@@ -21,7 +21,7 @@ static struct event_constraint uncore_constraint_fixed =
struct event_constraint uncore_constraint_empty =
EVENT_CONSTRAINT(0, 0, 0);
-int uncore_pcibus_to_physid(struct pci_bus *bus)
+static int uncore_pcibus_to_physid(struct pci_bus *bus)
{
struct pci2phy_map *map;
int phys_id = -1;
@@ -38,6 +38,16 @@ int uncore_pcibus_to_physid(struct pci_bus *bus)
return phys_id;
}
+static void uncore_free_pcibus_map(void)
+{
+ struct pci2phy_map *map, *tmp;
+
+ list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
+ list_del(&map->list);
+ kfree(map);
+ }
+}
+
struct pci2phy_map *__find_pci2phy_map(int segment)
{
struct pci2phy_map *map, *alloc = NULL;
@@ -82,43 +92,9 @@ ssize_t uncore_event_show(struct kobject *kobj,
return sprintf(buf, "%s", event->config);
}
-struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
-{
- return container_of(event->pmu, struct intel_uncore_pmu, pmu);
-}
-
struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
{
- struct intel_uncore_box *box;
-
- box = *per_cpu_ptr(pmu->box, cpu);
- if (box)
- return box;
-
- raw_spin_lock(&uncore_box_lock);
- /* Recheck in lock to handle races. */
- if (*per_cpu_ptr(pmu->box, cpu))
- goto out;
- list_for_each_entry(box, &pmu->box_list, list) {
- if (box->phys_id == topology_physical_package_id(cpu)) {
- atomic_inc(&box->refcnt);
- *per_cpu_ptr(pmu->box, cpu) = box;
- break;
- }
- }
-out:
- raw_spin_unlock(&uncore_box_lock);
-
- return *per_cpu_ptr(pmu->box, cpu);
-}
-
-struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
-{
- /*
- * perf core schedules event on the basis of cpu, uncore events are
- * collected by one of the cpus inside a physical package.
- */
- return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
+ return pmu->boxes[topology_logical_package_id(cpu)];
}
u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
@@ -207,7 +183,8 @@ u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
return config;
}
-static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx)
+static void uncore_assign_hw_event(struct intel_uncore_box *box,
+ struct perf_event *event, int idx)
{
struct hw_perf_event *hwc = &event->hw;
@@ -302,24 +279,25 @@ static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
box->hrtimer.function = uncore_pmu_hrtimer;
}
-static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
+static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
+ int node)
{
+ int i, size, numshared = type->num_shared_regs ;
struct intel_uncore_box *box;
- int i, size;
- size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
+ size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
box = kzalloc_node(size, GFP_KERNEL, node);
if (!box)
return NULL;
- for (i = 0; i < type->num_shared_regs; i++)
+ for (i = 0; i < numshared; i++)
raw_spin_lock_init(&box->shared_regs[i].lock);
uncore_pmu_init_hrtimer(box);
- atomic_set(&box->refcnt, 1);
box->cpu = -1;
- box->phys_id = -1;
+ box->pci_phys_id = -1;
+ box->pkgid = -1;
/* set default hrtimer timeout */
box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
@@ -341,7 +319,8 @@ static bool is_uncore_event(struct perf_event *event)
}
static int
-uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
+uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
+ bool dogrp)
{
struct perf_event *event;
int n, max_count;
@@ -402,7 +381,8 @@ uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *eve
return &type->unconstrainted;
}
-static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
+static void uncore_put_event_constraint(struct intel_uncore_box *box,
+ struct perf_event *event)
{
if (box->pmu->type->ops->put_constraint)
box->pmu->type->ops->put_constraint(box, event);
@@ -582,7 +562,7 @@ static void uncore_pmu_event_del(struct perf_event *event, int flags)
if (event == box->event_list[i]) {
uncore_put_event_constraint(box, event);
- while (++i < box->n_events)
+ for (++i; i < box->n_events; i++)
box->event_list[i - 1] = box->event_list[i];
--box->n_events;
@@ -676,6 +656,7 @@ static int uncore_pmu_event_init(struct perf_event *event)
if (!box || box->cpu < 0)
return -EINVAL;
event->cpu = box->cpu;
+ event->pmu_private = box;
event->hw.idx = -1;
event->hw.last_tag = ~0ULL;
@@ -760,64 +741,110 @@ static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
}
ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
+ if (!ret)
+ pmu->registered = true;
return ret;
}
+static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
+{
+ if (!pmu->registered)
+ return;
+ perf_pmu_unregister(&pmu->pmu);
+ pmu->registered = false;
+}
+
+static void __init __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
+{
+ struct intel_uncore_pmu *pmu = type->pmus;
+ struct intel_uncore_box *box;
+ int i, pkg;
+
+ if (pmu) {
+ pkg = topology_physical_package_id(cpu);
+ for (i = 0; i < type->num_boxes; i++, pmu++) {
+ box = pmu->boxes[pkg];
+ if (box)
+ uncore_box_exit(box);
+ }
+ }
+}
+
+static void __init uncore_exit_boxes(void *dummy)
+{
+ struct intel_uncore_type **types;
+
+ for (types = uncore_msr_uncores; *types; types++)
+ __uncore_exit_boxes(*types++, smp_processor_id());
+}
+
+static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
+{
+ int pkg;
+
+ for (pkg = 0; pkg < max_packages; pkg++)
+ kfree(pmu->boxes[pkg]);
+ kfree(pmu->boxes);
+}
+
static void __init uncore_type_exit(struct intel_uncore_type *type)
{
+ struct intel_uncore_pmu *pmu = type->pmus;
int i;
- for (i = 0; i < type->num_boxes; i++)
- free_percpu(type->pmus[i].box);
- kfree(type->pmus);
- type->pmus = NULL;
+ if (pmu) {
+ for (i = 0; i < type->num_boxes; i++, pmu++) {
+ uncore_pmu_unregister(pmu);
+ uncore_free_boxes(pmu);
+ }
+ kfree(type->pmus);
+ type->pmus = NULL;
+ }
kfree(type->events_group);
type->events_group = NULL;
}
static void __init uncore_types_exit(struct intel_uncore_type **types)
{
- int i;
- for (i = 0; types[i]; i++)
- uncore_type_exit(types[i]);
+ for (; *types; types++)
+ uncore_type_exit(*types);
}
-static int __init uncore_type_init(struct intel_uncore_type *type)
+static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
{
struct intel_uncore_pmu *pmus;
struct attribute_group *attr_group;
struct attribute **attrs;
+ size_t size;
int i, j;
pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
if (!pmus)
return -ENOMEM;
- type->pmus = pmus;
+ size = max_packages * sizeof(struct intel_uncore_box *);
+ for (i = 0; i < type->num_boxes; i++) {
+ pmus[i].func_id = setid ? i : -1;
+ pmus[i].pmu_idx = i;
+ pmus[i].type = type;
+ pmus[i].boxes = kzalloc(size, GFP_KERNEL);
+ if (!pmus[i].boxes)
+ return -ENOMEM;
+ }
+
+ type->pmus = pmus;
type->unconstrainted = (struct event_constraint)
__EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
0, type->num_counters, 0, 0);
- for (i = 0; i < type->num_boxes; i++) {
- pmus[i].func_id = -1;
- pmus[i].pmu_idx = i;
- pmus[i].type = type;
- INIT_LIST_HEAD(&pmus[i].box_list);
- pmus[i].box = alloc_percpu(struct intel_uncore_box *);
- if (!pmus[i].box)
- goto fail;
- }
-
if (type->event_descs) {
- i = 0;
- while (type->event_descs[i].attr.attr.name)
- i++;
+ for (i = 0; type->event_descs[i].attr.attr.name; i++);
attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
sizeof(*attr_group), GFP_KERNEL);
if (!attr_group)
- goto fail;
+ return -ENOMEM;
attrs = (struct attribute **)(attr_group + 1);
attr_group->name = "events";
@@ -831,25 +858,19 @@ static int __init uncore_type_init(struct intel_uncore_type *type)
type->pmu_group = &uncore_pmu_attr_group;
return 0;
-fail:
- uncore_type_exit(type);
- return -ENOMEM;
}
-static int __init uncore_types_init(struct intel_uncore_type **types)
+static int __init
+uncore_types_init(struct intel_uncore_type **types, bool setid)
{
- int i, ret;
+ int ret;
- for (i = 0; types[i]; i++) {
- ret = uncore_type_init(types[i]);
+ for (; *types; types++) {
+ ret = uncore_type_init(*types, setid);
if (ret)
- goto fail;
+ return ret;
}
return 0;
-fail:
- while (--i >= 0)
- uncore_type_exit(types[i]);
- return ret;
}
/*
@@ -857,28 +878,28 @@ fail:
*/
static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct intel_uncore_type *type;
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
- struct intel_uncore_type *type;
- int phys_id;
- bool first_box = false;
+ int phys_id, pkg, ret;
phys_id = uncore_pcibus_to_physid(pdev->bus);
if (phys_id < 0)
return -ENODEV;
+ pkg = topology_phys_to_logical_pkg(phys_id);
+ if (WARN_ON_ONCE(pkg < 0))
+ return -EINVAL;
+
if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
- uncore_extra_pci_dev[phys_id][idx] = pdev;
+
+ uncore_extra_pci_dev[pkg].dev[idx] = pdev;
pci_set_drvdata(pdev, NULL);
return 0;
}
type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
- box = uncore_alloc_box(type, NUMA_NO_NODE);
- if (!box)
- return -ENOMEM;
-
/*
* for performance monitoring unit with multiple boxes,
* each box has a different function id.
@@ -890,44 +911,60 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
* some device types. Hence PCI device idx would be 0 for all devices.
* So increment pmu pointer to point to an unused array element.
*/
- if (boot_cpu_data.x86_model == 87)
+ if (boot_cpu_data.x86_model == 87) {
while (pmu->func_id >= 0)
pmu++;
+ }
+
+ if (WARN_ON_ONCE(pmu->boxes[pkg] != NULL))
+ return -EINVAL;
+
+ box = uncore_alloc_box(type, NUMA_NO_NODE);
+ if (!box)
+ return -ENOMEM;
+
if (pmu->func_id < 0)
pmu->func_id = pdev->devfn;
else
WARN_ON_ONCE(pmu->func_id != pdev->devfn);
- box->phys_id = phys_id;
+ atomic_inc(&box->refcnt);
+ box->pci_phys_id = phys_id;
+ box->pkgid = pkg;
box->pci_dev = pdev;
box->pmu = pmu;
uncore_box_init(box);
pci_set_drvdata(pdev, box);
- raw_spin_lock(&uncore_box_lock);
- if (list_empty(&pmu->box_list))
- first_box = true;
- list_add_tail(&box->list, &pmu->box_list);
- raw_spin_unlock(&uncore_box_lock);
+ pmu->boxes[pkg] = box;
+ if (atomic_inc_return(&pmu->activeboxes) > 1)
+ return 0;
- if (first_box)
- uncore_pmu_register(pmu);
- return 0;
+ /* First active box registers the pmu */
+ ret = uncore_pmu_register(pmu);
+ if (ret) {
+ pci_set_drvdata(pdev, NULL);
+ pmu->boxes[pkg] = NULL;
+ uncore_box_exit(box);
+ kfree(box);
+ }
+ return ret;
}
static void uncore_pci_remove(struct pci_dev *pdev)
{
struct intel_uncore_box *box = pci_get_drvdata(pdev);
struct intel_uncore_pmu *pmu;
- int i, cpu, phys_id;
- bool last_box = false;
+ int i, phys_id, pkg;
phys_id = uncore_pcibus_to_physid(pdev->bus);
+ pkg = topology_phys_to_logical_pkg(phys_id);
+
box = pci_get_drvdata(pdev);
if (!box) {
for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
- if (uncore_extra_pci_dev[phys_id][i] == pdev) {
- uncore_extra_pci_dev[phys_id][i] = NULL;
+ if (uncore_extra_pci_dev[pkg].dev[i] == pdev) {
+ uncore_extra_pci_dev[pkg].dev[i] = NULL;
break;
}
}
@@ -936,33 +973,20 @@ static void uncore_pci_remove(struct pci_dev *pdev)
}
pmu = box->pmu;
- if (WARN_ON_ONCE(phys_id != box->phys_id))
+ if (WARN_ON_ONCE(phys_id != box->pci_phys_id))
return;
pci_set_drvdata(pdev, NULL);
-
- raw_spin_lock(&uncore_box_lock);
- list_del(&box->list);
- if (list_empty(&pmu->box_list))
- last_box = true;
- raw_spin_unlock(&uncore_box_lock);
-
- for_each_possible_cpu(cpu) {
- if (*per_cpu_ptr(pmu->box, cpu) == box) {
- *per_cpu_ptr(pmu->box, cpu) = NULL;
- atomic_dec(&box->refcnt);
- }
- }
-
- WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
+ pmu->boxes[pkg] = NULL;
+ if (atomic_dec_return(&pmu->activeboxes) == 0)
+ uncore_pmu_unregister(pmu);
+ uncore_box_exit(box);
kfree(box);
-
- if (last_box)
- perf_pmu_unregister(&pmu->pmu);
}
static int __init uncore_pci_init(void)
{
+ size_t size;
int ret;
switch (boot_cpu_data.x86_model) {
@@ -999,25 +1023,40 @@ static int __init uncore_pci_init(void)
ret = skl_uncore_pci_init();
break;
default:
- return 0;
+ return -ENODEV;
}
if (ret)
return ret;
- ret = uncore_types_init(uncore_pci_uncores);
+ size = max_packages * sizeof(struct pci_extra_dev);
+ uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
+ if (!uncore_extra_pci_dev) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = uncore_types_init(uncore_pci_uncores, false);
if (ret)
- return ret;
+ goto errtype;
uncore_pci_driver->probe = uncore_pci_probe;
uncore_pci_driver->remove = uncore_pci_remove;
ret = pci_register_driver(uncore_pci_driver);
- if (ret == 0)
- pcidrv_registered = true;
- else
- uncore_types_exit(uncore_pci_uncores);
+ if (ret)
+ goto errtype;
+
+ pcidrv_registered = true;
+ return 0;
+errtype:
+ uncore_types_exit(uncore_pci_uncores);
+ kfree(uncore_extra_pci_dev);
+ uncore_extra_pci_dev = NULL;
+ uncore_free_pcibus_map();
+err:
+ uncore_pci_uncores = empty_uncore;
return ret;
}
@@ -1027,173 +1066,139 @@ static void __init uncore_pci_exit(void)
pcidrv_registered = false;
pci_unregister_driver(uncore_pci_driver);
uncore_types_exit(uncore_pci_uncores);
- }
-}
-
-/* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
-static LIST_HEAD(boxes_to_free);
-
-static void uncore_kfree_boxes(void)
-{
- struct intel_uncore_box *box;
-
- while (!list_empty(&boxes_to_free)) {
- box = list_entry(boxes_to_free.next,
- struct intel_uncore_box, list);
- list_del(&box->list);
- kfree(box);
+ kfree(uncore_extra_pci_dev);
+ uncore_free_pcibus_map();
}
}
static void uncore_cpu_dying(int cpu)
{
- struct intel_uncore_type *type;
+ struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
- int i, j;
-
- for (i = 0; uncore_msr_uncores[i]; i++) {
- type = uncore_msr_uncores[i];
- for (j = 0; j < type->num_boxes; j++) {
- pmu = &type->pmus[j];
- box = *per_cpu_ptr(pmu->box, cpu);
- *per_cpu_ptr(pmu->box, cpu) = NULL;
- if (box && atomic_dec_and_test(&box->refcnt))
- list_add(&box->list, &boxes_to_free);
+ int i, pkg;
+
+ pkg = topology_logical_package_id(cpu);
+ for (; *types; types++) {
+ type = *types;
+ pmu = type->pmus;
+ for (i = 0; i < type->num_boxes; i++, pmu++) {
+ box = pmu->boxes[pkg];
+ if (box && atomic_dec_return(&box->refcnt) == 0)
+ uncore_box_exit(box);
}
}
}
-static int uncore_cpu_starting(int cpu)
+static void uncore_cpu_starting(int cpu, bool init)
{
- struct intel_uncore_type *type;
+ struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
- struct intel_uncore_box *box, *exist;
- int i, j, k, phys_id;
-
- phys_id = topology_physical_package_id(cpu);
-
- for (i = 0; uncore_msr_uncores[i]; i++) {
- type = uncore_msr_uncores[i];
- for (j = 0; j < type->num_boxes; j++) {
- pmu = &type->pmus[j];
- box = *per_cpu_ptr(pmu->box, cpu);
- /* called by uncore_cpu_init? */
- if (box && box->phys_id >= 0) {
- uncore_box_init(box);
- continue;
- }
+ struct intel_uncore_box *box;
+ int i, pkg, ncpus = 1;
- for_each_online_cpu(k) {
- exist = *per_cpu_ptr(pmu->box, k);
- if (exist && exist->phys_id == phys_id) {
- atomic_inc(&exist->refcnt);
- *per_cpu_ptr(pmu->box, cpu) = exist;
- if (box) {
- list_add(&box->list,
- &boxes_to_free);
- box = NULL;
- }
- break;
- }
- }
+ if (init) {
+ /*
+ * On init we get the number of online cpus in the package
+ * and set refcount for all of them.
+ */
+ ncpus = cpumask_weight(topology_core_cpumask(cpu));
+ }
- if (box) {
- box->phys_id = phys_id;
+ pkg = topology_logical_package_id(cpu);
+ for (; *types; types++) {
+ type = *types;
+ pmu = type->pmus;
+ for (i = 0; i < type->num_boxes; i++, pmu++) {
+ box = pmu->boxes[pkg];
+ if (!box)
+ continue;
+ /* The first cpu on a package activates the box */
+ if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
uncore_box_init(box);
- }
}
}
- return 0;
}
-static int uncore_cpu_prepare(int cpu, int phys_id)
+static int uncore_cpu_prepare(int cpu)
{
- struct intel_uncore_type *type;
+ struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
- int i, j;
-
- for (i = 0; uncore_msr_uncores[i]; i++) {
- type = uncore_msr_uncores[i];
- for (j = 0; j < type->num_boxes; j++) {
- pmu = &type->pmus[j];
- if (pmu->func_id < 0)
- pmu->func_id = j;
-
+ int i, pkg;
+
+ pkg = topology_logical_package_id(cpu);
+ for (; *types; types++) {
+ type = *types;
+ pmu = type->pmus;
+ for (i = 0; i < type->num_boxes; i++, pmu++) {
+ if (pmu->boxes[pkg])
+ continue;
+ /* First cpu of a package allocates the box */
box = uncore_alloc_box(type, cpu_to_node(cpu));
if (!box)
return -ENOMEM;
-
box->pmu = pmu;
- box->phys_id = phys_id;
- *per_cpu_ptr(pmu->box, cpu) = box;
+ box->pkgid = pkg;
+ pmu->boxes[pkg] = box;
}
}
return 0;
}
-static void
-uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
+static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
+ int new_cpu)
{
- struct intel_uncore_type *type;
- struct intel_uncore_pmu *pmu;
+ struct intel_uncore_pmu *pmu = type->pmus;
struct intel_uncore_box *box;
- int i, j;
+ int i, pkg;
- for (i = 0; uncores[i]; i++) {
- type = uncores[i];
- for (j = 0; j < type->num_boxes; j++) {
- pmu = &type->pmus[j];
- if (old_cpu < 0)
- box = uncore_pmu_to_box(pmu, new_cpu);
- else
- box = uncore_pmu_to_box(pmu, old_cpu);
- if (!box)
- continue;
-
- if (old_cpu < 0) {
- WARN_ON_ONCE(box->cpu != -1);
- box->cpu = new_cpu;
- continue;
- }
+ pkg = topology_logical_package_id(old_cpu < 0 ? new_cpu : old_cpu);
+ for (i = 0; i < type->num_boxes; i++, pmu++) {
+ box = pmu->boxes[pkg];
+ if (!box)
+ continue;
- WARN_ON_ONCE(box->cpu != old_cpu);
- if (new_cpu >= 0) {
- uncore_pmu_cancel_hrtimer(box);
- perf_pmu_migrate_context(&pmu->pmu,
- old_cpu, new_cpu);
- box->cpu = new_cpu;
- } else {
- box->cpu = -1;
- }
+ if (old_cpu < 0) {
+ WARN_ON_ONCE(box->cpu != -1);
+ box->cpu = new_cpu;
+ continue;
}
+
+ WARN_ON_ONCE(box->cpu != old_cpu);
+ box->cpu = -1;
+ if (new_cpu < 0)
+ continue;
+
+ uncore_pmu_cancel_hrtimer(box);
+ perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
+ box->cpu = new_cpu;
}
}
+static void uncore_change_context(struct intel_uncore_type **uncores,
+ int old_cpu, int new_cpu)
+{
+ for (; *uncores; uncores++)
+ uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
+}
+
static void uncore_event_exit_cpu(int cpu)
{
- int i, phys_id, target;
+ int target;
- /* if exiting cpu is used for collecting uncore events */
+ /* Check if exiting cpu is used for collecting uncore events */
if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
return;
- /* find a new cpu to collect uncore events */
- phys_id = topology_physical_package_id(cpu);
- target = -1;
- for_each_online_cpu(i) {
- if (i == cpu)
- continue;
- if (phys_id == topology_physical_package_id(i)) {
- target = i;
- break;
- }
- }
+ /* Find a new cpu to collect uncore events */
+ target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
- /* migrate uncore events to the new cpu */
- if (target >= 0)
+ /* Migrate uncore events to the new target */
+ if (target < nr_cpu_ids)
cpumask_set_cpu(target, &uncore_cpu_mask);
+ else
+ target = -1;
uncore_change_context(uncore_msr_uncores, cpu, target);
uncore_change_context(uncore_pci_uncores, cpu, target);
@@ -1201,13 +1206,15 @@ static void uncore_event_exit_cpu(int cpu)
static void uncore_event_init_cpu(int cpu)
{
- int i, phys_id;
+ int target;
- phys_id = topology_physical_package_id(cpu);
- for_each_cpu(i, &uncore_cpu_mask) {
- if (phys_id == topology_physical_package_id(i))
- return;
- }
+ /*
+ * Check if there is an online cpu in the package
+ * which collects uncore events already.
+ */
+ target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu));
+ if (target < nr_cpu_ids)
+ return;
cpumask_set_cpu(cpu, &uncore_cpu_mask);
@@ -1220,39 +1227,25 @@ static int uncore_cpu_notifier(struct notifier_block *self,
{
unsigned int cpu = (long)hcpu;
- /* allocate/free data structure for uncore box */
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
- uncore_cpu_prepare(cpu, -1);
- break;
+ return notifier_from_errno(uncore_cpu_prepare(cpu));
+
case CPU_STARTING:
- uncore_cpu_starting(cpu);
+ uncore_cpu_starting(cpu, false);
+ case CPU_DOWN_FAILED:
+ uncore_event_init_cpu(cpu);
break;
+
case CPU_UP_CANCELED:
case CPU_DYING:
uncore_cpu_dying(cpu);
break;
- case CPU_ONLINE:
- case CPU_DEAD:
- uncore_kfree_boxes();
- break;
- default:
- break;
- }
- /* select the cpu that collects uncore events */
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DOWN_FAILED:
- case CPU_STARTING:
- uncore_event_init_cpu(cpu);
- break;
case CPU_DOWN_PREPARE:
uncore_event_exit_cpu(cpu);
break;
- default:
- break;
}
-
return NOTIFY_OK;
}
@@ -1265,9 +1258,29 @@ static struct notifier_block uncore_cpu_nb = {
.priority = CPU_PRI_PERF + 1,
};
-static void __init uncore_cpu_setup(void *dummy)
+static int __init type_pmu_register(struct intel_uncore_type *type)
{
- uncore_cpu_starting(smp_processor_id());
+ int i, ret;
+
+ for (i = 0; i < type->num_boxes; i++) {
+ ret = uncore_pmu_register(&type->pmus[i]);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int __init uncore_msr_pmus_register(void)
+{
+ struct intel_uncore_type **types = uncore_msr_uncores;
+ int ret;
+
+ for (; *types; types++) {
+ ret = type_pmu_register(*types);
+ if (ret)
+ return ret;
+ }
+ return 0;
}
static int __init uncore_cpu_init(void)
@@ -1311,71 +1324,61 @@ static int __init uncore_cpu_init(void)
knl_uncore_cpu_init();
break;
default:
- return 0;
+ return -ENODEV;
}
- ret = uncore_types_init(uncore_msr_uncores);
+ ret = uncore_types_init(uncore_msr_uncores, true);
if (ret)
- return ret;
+ goto err;
+ ret = uncore_msr_pmus_register();
+ if (ret)
+ goto err;
return 0;
+err:
+ uncore_types_exit(uncore_msr_uncores);
+ uncore_msr_uncores = empty_uncore;
+ return ret;
}
-static int __init uncore_pmus_register(void)
+static void __init uncore_cpu_setup(void *dummy)
{
- struct intel_uncore_pmu *pmu;
- struct intel_uncore_type *type;
- int i, j;
-
- for (i = 0; uncore_msr_uncores[i]; i++) {
- type = uncore_msr_uncores[i];
- for (j = 0; j < type->num_boxes; j++) {
- pmu = &type->pmus[j];
- uncore_pmu_register(pmu);
- }
- }
-
- return 0;
+ uncore_cpu_starting(smp_processor_id(), true);
}
-static void __init uncore_cpumask_init(void)
-{
- int cpu;
-
- /*
- * ony invoke once from msr or pci init code
- */
- if (!cpumask_empty(&uncore_cpu_mask))
- return;
+/* Lazy to avoid allocation of a few bytes for the normal case */
+static __initdata DECLARE_BITMAP(packages, MAX_LOCAL_APIC);
- cpu_notifier_register_begin();
+static int __init uncore_cpumask_init(bool msr)
+{
+ unsigned int cpu;
for_each_online_cpu(cpu) {
- int i, phys_id = topology_physical_package_id(cpu);
+ unsigned int pkg = topology_logical_package_id(cpu);
+ int ret;
- for_each_cpu(i, &uncore_cpu_mask) {
- if (phys_id == topology_physical_package_id(i)) {
- phys_id = -1;
- break;
- }
- }
- if (phys_id < 0)
+ if (test_and_set_bit(pkg, packages))
continue;
-
- uncore_cpu_prepare(cpu, phys_id);
+ /*
+ * The first online cpu of each package allocates and takes
+ * the refcounts for all other online cpus in that package.
+ * If msrs are not enabled no allocation is required.
+ */
+ if (msr) {
+ ret = uncore_cpu_prepare(cpu);
+ if (ret)
+ return ret;
+ }
uncore_event_init_cpu(cpu);
+ smp_call_function_single(cpu, uncore_cpu_setup, NULL, 1);
}
- on_each_cpu(uncore_cpu_setup, NULL, 1);
-
__register_cpu_notifier(&uncore_cpu_nb);
-
- cpu_notifier_register_done();
+ return 0;
}
-
static int __init intel_uncore_init(void)
{
- int ret;
+ int pret, cret, ret;
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return -ENODEV;
@@ -1383,19 +1386,27 @@ static int __init intel_uncore_init(void)
if (cpu_has_hypervisor)
return -ENODEV;
- ret = uncore_pci_init();
- if (ret)
- goto fail;
- ret = uncore_cpu_init();
- if (ret) {
- uncore_pci_exit();
- goto fail;
- }
- uncore_cpumask_init();
+ max_packages = topology_max_packages();
+
+ pret = uncore_pci_init();
+ cret = uncore_cpu_init();
- uncore_pmus_register();
+ if (cret && pret)
+ return -ENODEV;
+
+ cpu_notifier_register_begin();
+ ret = uncore_cpumask_init(!cret);
+ if (ret)
+ goto err;
+ cpu_notifier_register_done();
return 0;
-fail:
+
+err:
+ /* Undo box->init_box() */
+ on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
+ uncore_types_exit(uncore_msr_uncores);
+ uncore_pci_exit();
+ cpu_notifier_register_done();
return ret;
}
device_initcall(intel_uncore_init);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/events/intel/uncore.h
index a7086b862156..79766b9a3580 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -1,8 +1,10 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci.h>
+#include <asm/apicdef.h>
+
#include <linux/perf_event.h>
-#include "perf_event.h"
+#include "../perf_event.h"
#define UNCORE_PMU_NAME_LEN 32
#define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC)
@@ -19,11 +21,12 @@
#define UNCORE_EXTRA_PCI_DEV 0xff
#define UNCORE_EXTRA_PCI_DEV_MAX 3
-/* support up to 8 sockets */
-#define UNCORE_SOCKET_MAX 8
-
#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
+struct pci_extra_dev {
+ struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX];
+};
+
struct intel_uncore_ops;
struct intel_uncore_pmu;
struct intel_uncore_box;
@@ -61,6 +64,7 @@ struct intel_uncore_type {
struct intel_uncore_ops {
void (*init_box)(struct intel_uncore_box *);
+ void (*exit_box)(struct intel_uncore_box *);
void (*disable_box)(struct intel_uncore_box *);
void (*enable_box)(struct intel_uncore_box *);
void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
@@ -73,13 +77,14 @@ struct intel_uncore_ops {
};
struct intel_uncore_pmu {
- struct pmu pmu;
- char name[UNCORE_PMU_NAME_LEN];
- int pmu_idx;
- int func_id;
- struct intel_uncore_type *type;
- struct intel_uncore_box ** __percpu box;
- struct list_head box_list;
+ struct pmu pmu;
+ char name[UNCORE_PMU_NAME_LEN];
+ int pmu_idx;
+ int func_id;
+ bool registered;
+ atomic_t activeboxes;
+ struct intel_uncore_type *type;
+ struct intel_uncore_box **boxes;
};
struct intel_uncore_extra_reg {
@@ -89,7 +94,8 @@ struct intel_uncore_extra_reg {
};
struct intel_uncore_box {
- int phys_id;
+ int pci_phys_id;
+ int pkgid;
int n_active; /* number of active events */
int n_events;
int cpu; /* cpu to collect events */
@@ -123,7 +129,6 @@ struct pci2phy_map {
int pbus_to_physid[256];
};
-int uncore_pcibus_to_physid(struct pci_bus *bus);
struct pci2phy_map *__find_pci2phy_map(int segment);
ssize_t uncore_event_show(struct kobject *kobj,
@@ -305,14 +310,30 @@ static inline void uncore_box_init(struct intel_uncore_box *box)
}
}
+static inline void uncore_box_exit(struct intel_uncore_box *box)
+{
+ if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
+ if (box->pmu->type->ops->exit_box)
+ box->pmu->type->ops->exit_box(box);
+ }
+}
+
static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
{
- return (box->phys_id < 0);
+ return (box->pkgid < 0);
+}
+
+static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
+{
+ return container_of(event->pmu, struct intel_uncore_pmu, pmu);
+}
+
+static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
+{
+ return event->pmu_private;
}
-struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event);
struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
-struct intel_uncore_box *uncore_event_to_box(struct perf_event *event);
u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
@@ -328,7 +349,7 @@ extern struct intel_uncore_type **uncore_pci_uncores;
extern struct pci_driver *uncore_pci_driver;
extern raw_spinlock_t pci2phy_map_lock;
extern struct list_head pci2phy_map_head;
-extern struct pci_dev *uncore_extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
+extern struct pci_extra_dev *uncore_extra_pci_dev;
extern struct event_constraint uncore_constraint_empty;
/* perf_event_intel_uncore_snb.c */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c b/arch/x86/events/intel/uncore_nhmex.c
index 2749965afed0..cda569332005 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c
+++ b/arch/x86/events/intel/uncore_nhmex.c
@@ -1,5 +1,5 @@
/* Nehalem-EX/Westmere-EX uncore support */
-#include "perf_event_intel_uncore.h"
+#include "uncore.h"
/* NHM-EX event control */
#define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff
@@ -201,6 +201,11 @@ static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
}
+static void nhmex_uncore_msr_exit_box(struct intel_uncore_box *box)
+{
+ wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, 0);
+}
+
static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
{
unsigned msr = uncore_msr_box_ctl(box);
@@ -250,6 +255,7 @@ static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct p
#define NHMEX_UNCORE_OPS_COMMON_INIT() \
.init_box = nhmex_uncore_msr_init_box, \
+ .exit_box = nhmex_uncore_msr_exit_box, \
.disable_box = nhmex_uncore_msr_disable_box, \
.enable_box = nhmex_uncore_msr_enable_box, \
.disable_event = nhmex_uncore_msr_disable_event, \
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index 2bd030ddd0db..96531d2b843f 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -1,5 +1,5 @@
/* Nehalem/SandBridge/Haswell uncore support */
-#include "perf_event_intel_uncore.h"
+#include "uncore.h"
/* Uncore IMC PCI IDs */
#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
@@ -95,6 +95,12 @@ static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
}
}
+static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
+{
+ if (box->pmu->pmu_idx == 0)
+ wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0);
+}
+
static struct uncore_event_desc snb_uncore_events[] = {
INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
{ /* end: all zeroes */ },
@@ -116,6 +122,7 @@ static struct attribute_group snb_uncore_format_group = {
static struct intel_uncore_ops snb_uncore_msr_ops = {
.init_box = snb_uncore_msr_init_box,
+ .exit_box = snb_uncore_msr_exit_box,
.disable_event = snb_uncore_msr_disable_event,
.enable_event = snb_uncore_msr_enable_event,
.read_counter = uncore_msr_read_counter,
@@ -231,6 +238,11 @@ static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
}
+static void snb_uncore_imc_exit_box(struct intel_uncore_box *box)
+{
+ iounmap(box->io_addr);
+}
+
static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
{}
@@ -301,6 +313,7 @@ static int snb_uncore_imc_event_init(struct perf_event *event)
return -EINVAL;
event->cpu = box->cpu;
+ event->pmu_private = box;
event->hw.idx = -1;
event->hw.last_tag = ~0ULL;
@@ -458,6 +471,7 @@ static struct pmu snb_uncore_imc_pmu = {
static struct intel_uncore_ops snb_uncore_imc_ops = {
.init_box = snb_uncore_imc_init_box,
+ .exit_box = snb_uncore_imc_exit_box,
.enable_box = snb_uncore_imc_enable_box,
.disable_box = snb_uncore_imc_disable_box,
.disable_event = snb_uncore_imc_disable_event,
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 33acb884ccf1..93f6bd9bf761 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -1,6 +1,5 @@
/* SandyBridge-EP/IvyTown uncore support */
-#include "perf_event_intel_uncore.h"
-
+#include "uncore.h"
/* SNB-EP Box level control */
#define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
@@ -987,7 +986,9 @@ static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_eve
if (reg1->idx != EXTRA_REG_NONE) {
int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
- struct pci_dev *filter_pdev = uncore_extra_pci_dev[box->phys_id][idx];
+ int pkg = topology_phys_to_logical_pkg(box->pci_phys_id);
+ struct pci_dev *filter_pdev = uncore_extra_pci_dev[pkg].dev[idx];
+
if (filter_pdev) {
pci_write_config_dword(filter_pdev, reg1->reg,
(u32)reg1->config);
@@ -2521,14 +2522,16 @@ static struct intel_uncore_type *hswep_msr_uncores[] = {
void hswep_uncore_cpu_init(void)
{
+ int pkg = topology_phys_to_logical_pkg(0);
+
if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
/* Detect 6-8 core systems with only two SBOXes */
- if (uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3]) {
+ if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
u32 capid4;
- pci_read_config_dword(uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3],
+ pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
0x94, &capid4);
if (((capid4 >> 6) & 0x3) == 0)
hswep_uncore_sbox.num_boxes = 2;
@@ -2875,11 +2878,13 @@ static struct intel_uncore_type bdx_uncore_sbox = {
.format_group = &hswep_uncore_sbox_format_group,
};
+#define BDX_MSR_UNCORE_SBOX 3
+
static struct intel_uncore_type *bdx_msr_uncores[] = {
&bdx_uncore_ubox,
&bdx_uncore_cbox,
- &bdx_uncore_sbox,
&hswep_uncore_pcu,
+ &bdx_uncore_sbox,
NULL,
};
@@ -2888,6 +2893,10 @@ void bdx_uncore_cpu_init(void)
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
uncore_msr_uncores = bdx_msr_uncores;
+
+ /* BDX-DE doesn't have SBOX */
+ if (boot_cpu_data.x86_model == 86)
+ uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
}
static struct intel_uncore_type bdx_uncore_ha = {
diff --git a/arch/x86/kernel/cpu/perf_event_msr.c b/arch/x86/events/msr.c
index ec863b9a9f78..ec863b9a9f78 100644
--- a/arch/x86/kernel/cpu/perf_event_msr.c
+++ b/arch/x86/events/msr.c
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/events/perf_event.h
index 7bb61e32fb29..68155cafa8a1 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -586,6 +586,7 @@ struct x86_pmu {
pebs_broken :1,
pebs_prec_dist :1;
int pebs_record_size;
+ int pebs_buffer_size;
void (*drain_pebs)(struct pt_regs *regs);
struct event_constraint *pebs_constraints;
void (*pebs_aliases)(struct perf_event *event);
@@ -860,6 +861,8 @@ extern struct event_constraint intel_ivb_pebs_event_constraints[];
extern struct event_constraint intel_hsw_pebs_event_constraints[];
+extern struct event_constraint intel_bdw_pebs_event_constraints[];
+
extern struct event_constraint intel_skl_pebs_event_constraints[];
struct event_constraint *intel_pebs_constraints(struct perf_event *event);
@@ -904,6 +907,8 @@ void intel_pmu_lbr_init_skl(void);
void intel_pmu_lbr_init_knl(void);
+void intel_pmu_pebs_data_source_nhm(void);
+
int intel_pmu_setup_lbr_filter(struct perf_event *event);
void intel_pt_interrupt(void);
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 1514753fd435..15340e36ddcb 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -256,7 +256,7 @@ extern int force_personality32;
instruction set this CPU supports. This could be done in user space,
but it's not easy, and we've already done it here. */
-#define ELF_HWCAP (boot_cpu_data.x86_capability[0])
+#define ELF_HWCAP (boot_cpu_data.x86_capability[CPUID_1_EDX])
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 7bcb861a04e5..5a2ed3ed2f26 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -165,6 +165,7 @@ struct x86_pmu_capability {
#define GLOBAL_STATUS_ASIF BIT_ULL(60)
#define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59)
#define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(58)
+#define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(55)
/*
* IBS cpuid feature detection
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 20c11d1aa4cc..813384ef811a 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -129,6 +129,8 @@ struct cpuinfo_x86 {
u16 booted_cores;
/* Physical processor id: */
u16 phys_proc_id;
+ /* Logical processor id: */
+ u16 logical_proc_id;
/* Core id: */
u16 cpu_core_id;
/* Compute unit id */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 0fb46482dfde..7f991bd5031b 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -119,12 +119,23 @@ static inline void setup_node_to_cpumask_map(void) { }
extern const struct cpumask *cpu_coregroup_mask(int cpu);
+#define topology_logical_package_id(cpu) (cpu_data(cpu).logical_proc_id)
#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
#ifdef ENABLE_TOPO_DEFINES
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
+
+extern unsigned int __max_logical_packages;
+#define topology_max_packages() (__max_logical_packages)
+int topology_update_package_map(unsigned int apicid, unsigned int cpu);
+extern int topology_phys_to_logical_pkg(unsigned int pkg);
+#else
+#define topology_max_packages() (1)
+static inline int
+topology_update_package_map(unsigned int apicid, unsigned int cpu) { return 0; }
+static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; }
#endif
static inline void arch_fix_phys_package_id(int num, u32 slot)
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 8a5cddac7d44..531b9611c51d 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -2078,6 +2078,20 @@ int generic_processor_info(int apicid, int version)
cpu = cpumask_next_zero(-1, cpu_present_mask);
/*
+ * This can happen on physical hotplug. The sanity check at boot time
+ * is done from native_smp_prepare_cpus() after num_possible_cpus() is
+ * established.
+ */
+ if (topology_update_package_map(apicid, cpu) < 0) {
+ int thiscpu = max + disabled_cpus;
+
+ pr_warning("ACPI: Package limit reached. Processor %d/0x%x ignored.\n",
+ thiscpu, apicid);
+ disabled_cpus++;
+ return -ENOSPC;
+ }
+
+ /*
* Validate version
*/
if (version == 0x0) {
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 58031303e304..7a60424d63fa 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -30,33 +30,11 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
-obj-$(CONFIG_PERF_EVENTS) += perf_event.o
-
-ifdef CONFIG_PERF_EVENTS
-obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o perf_event_amd_uncore.o
-ifdef CONFIG_AMD_IOMMU
-obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o
-endif
-obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o
-obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
-obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o perf_event_intel_cqm.o
-obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_pt.o perf_event_intel_bts.o
-obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_cstate.o
-
-obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += perf_event_intel_uncore.o \
- perf_event_intel_uncore_snb.o \
- perf_event_intel_uncore_snbep.o \
- perf_event_intel_uncore_nhmex.o
-obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_msr.o
-obj-$(CONFIG_CPU_SUP_AMD) += perf_event_msr.o
-endif
-
-
obj-$(CONFIG_X86_MCE) += mcheck/
obj-$(CONFIG_MTRR) += mtrr/
obj-$(CONFIG_MICROCODE) += microcode/
-obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o perf_event_amd_ibs.o
+obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
obj-$(CONFIG_HYPERVISOR_GUEST) += vmware.o hypervisor.o mshyperv.o
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index a07956a08936..97c59fd60702 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -117,7 +117,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
void (*f_vide)(void);
u64 d, d2;
- printk(KERN_INFO "AMD K6 stepping B detected - ");
+ pr_info("AMD K6 stepping B detected - ");
/*
* It looks like AMD fixed the 2.6.2 bug and improved indirect
@@ -133,10 +133,9 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
d = d2-d;
if (d > 20*K6_BUG_LOOP)
- printk(KERN_CONT
- "system stability may be impaired when more than 32 MB are used.\n");
+ pr_cont("system stability may be impaired when more than 32 MB are used.\n");
else
- printk(KERN_CONT "probably OK (after B9730xxxx).\n");
+ pr_cont("probably OK (after B9730xxxx).\n");
}
/* K6 with old style WHCR */
@@ -154,7 +153,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
wbinvd();
wrmsr(MSR_K6_WHCR, l, h);
local_irq_restore(flags);
- printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
+ pr_info("Enabling old style K6 write allocation for %d Mb\n",
mbytes);
}
return;
@@ -175,7 +174,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
wbinvd();
wrmsr(MSR_K6_WHCR, l, h);
local_irq_restore(flags);
- printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
+ pr_info("Enabling new style K6 write allocation for %d Mb\n",
mbytes);
}
@@ -202,7 +201,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
*/
if (c->x86_model >= 6 && c->x86_model <= 10) {
if (!cpu_has(c, X86_FEATURE_XMM)) {
- printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
+ pr_info("Enabling disabled K7/SSE Support.\n");
msr_clear_bit(MSR_K7_HWCR, 15);
set_cpu_cap(c, X86_FEATURE_XMM);
}
@@ -216,9 +215,8 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
rdmsr(MSR_K7_CLK_CTL, l, h);
if ((l & 0xfff00000) != 0x20000000) {
- printk(KERN_INFO
- "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
- l, ((l & 0x000fffff)|0x20000000));
+ pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
+ l, ((l & 0x000fffff)|0x20000000));
wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
}
}
@@ -485,7 +483,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
unsigned long pfn = tseg >> PAGE_SHIFT;
- printk(KERN_DEBUG "tseg: %010llx\n", tseg);
+ pr_debug("tseg: %010llx\n", tseg);
if (pfn_range_is_mapped(pfn, pfn + 1))
set_memory_4k((unsigned long)__va(tseg), 1);
}
@@ -500,8 +498,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
rdmsrl(MSR_K7_HWCR, val);
if (!(val & BIT(24)))
- printk(KERN_WARNING FW_BUG "TSC doesn't count "
- "with P0 frequency!\n");
+ pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
}
}
diff --git a/arch/x86/kernel/cpu/bugs_64.c b/arch/x86/kernel/cpu/bugs_64.c
index 04f0fe5af83e..a972ac4c7e7d 100644
--- a/arch/x86/kernel/cpu/bugs_64.c
+++ b/arch/x86/kernel/cpu/bugs_64.c
@@ -15,7 +15,7 @@ void __init check_bugs(void)
{
identify_boot_cpu();
#if !defined(CONFIG_SMP)
- printk(KERN_INFO "CPU: ");
+ pr_info("CPU: ");
print_cpu_info(&boot_cpu_data);
#endif
alternative_instructions();
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index ae20be6e483c..ce197bb7c129 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -29,7 +29,7 @@ static void init_c3(struct cpuinfo_x86 *c)
rdmsr(MSR_VIA_FCR, lo, hi);
lo |= ACE_FCR; /* enable ACE unit */
wrmsr(MSR_VIA_FCR, lo, hi);
- printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n");
+ pr_info("CPU: Enabled ACE h/w crypto\n");
}
/* enable RNG unit, if present and disabled */
@@ -37,7 +37,7 @@ static void init_c3(struct cpuinfo_x86 *c)
rdmsr(MSR_VIA_RNG, lo, hi);
lo |= RNG_ENABLE; /* enable RNG unit */
wrmsr(MSR_VIA_RNG, lo, hi);
- printk(KERN_INFO "CPU: Enabled h/w RNG\n");
+ pr_info("CPU: Enabled h/w RNG\n");
}
/* store Centaur Extended Feature Flags as
@@ -130,7 +130,7 @@ static void init_centaur(struct cpuinfo_x86 *c)
name = "C6";
fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
fcr_clr = DPDC;
- printk(KERN_NOTICE "Disabling bugged TSC.\n");
+ pr_notice("Disabling bugged TSC.\n");
clear_cpu_cap(c, X86_FEATURE_TSC);
break;
case 8:
@@ -163,11 +163,11 @@ static void init_centaur(struct cpuinfo_x86 *c)
newlo = (lo|fcr_set) & (~fcr_clr);
if (newlo != lo) {
- printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n",
+ pr_info("Centaur FCR was 0x%X now 0x%X\n",
lo, newlo);
wrmsr(MSR_IDT_FCR1, newlo, hi);
} else {
- printk(KERN_INFO "Centaur FCR is 0x%X\n", lo);
+ pr_info("Centaur FCR is 0x%X\n", lo);
}
/* Emulate MTRRs using Centaur's MCR. */
set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 37830de8f60a..81cf716f6f97 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -228,7 +228,7 @@ static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
lo |= 0x200000;
wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
- printk(KERN_NOTICE "CPU serial number disabled.\n");
+ pr_notice("CPU serial number disabled.\n");
clear_cpu_cap(c, X86_FEATURE_PN);
/* Disabling the serial number may affect the cpuid level */
@@ -329,9 +329,8 @@ static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
if (!warn)
continue;
- printk(KERN_WARNING
- "CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
- x86_cap_flag(df->feature), df->level);
+ pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
+ x86_cap_flag(df->feature), df->level);
}
}
@@ -510,7 +509,7 @@ void detect_ht(struct cpuinfo_x86 *c)
smp_num_siblings = (ebx & 0xff0000) >> 16;
if (smp_num_siblings == 1) {
- printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n");
+ pr_info_once("CPU0: Hyper-Threading is disabled\n");
goto out;
}
@@ -531,10 +530,10 @@ void detect_ht(struct cpuinfo_x86 *c)
out:
if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
- printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
- c->phys_proc_id);
- printk(KERN_INFO "CPU: Processor Core ID: %d\n",
- c->cpu_core_id);
+ pr_info("CPU: Physical Processor ID: %d\n",
+ c->phys_proc_id);
+ pr_info("CPU: Processor Core ID: %d\n",
+ c->cpu_core_id);
printed = 1;
}
#endif
@@ -559,9 +558,8 @@ static void get_cpu_vendor(struct cpuinfo_x86 *c)
}
}
- printk_once(KERN_ERR
- "CPU: vendor_id '%s' unknown, using generic init.\n" \
- "CPU: Your system may be unstable.\n", v);
+ pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
+ "CPU: Your system may be unstable.\n", v);
c->x86_vendor = X86_VENDOR_UNKNOWN;
this_cpu = &default_cpu;
@@ -760,7 +758,7 @@ void __init early_cpu_init(void)
int count = 0;
#ifdef CONFIG_PROCESSOR_SELECT
- printk(KERN_INFO "KERNEL supported cpus:\n");
+ pr_info("KERNEL supported cpus:\n");
#endif
for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
@@ -778,7 +776,7 @@ void __init early_cpu_init(void)
for (j = 0; j < 2; j++) {
if (!cpudev->c_ident[j])
continue;
- printk(KERN_INFO " %s %s\n", cpudev->c_vendor,
+ pr_info(" %s %s\n", cpudev->c_vendor,
cpudev->c_ident[j]);
}
}
@@ -977,6 +975,8 @@ static void identify_cpu(struct cpuinfo_x86 *c)
#ifdef CONFIG_NUMA
numa_add_cpu(smp_processor_id());
#endif
+ /* The boot/hotplug time assigment got cleared, restore it */
+ c->logical_proc_id = topology_phys_to_logical_pkg(c->phys_proc_id);
}
/*
@@ -1061,7 +1061,7 @@ static void __print_cpu_msr(void)
for (index = index_min; index < index_max; index++) {
if (rdmsrl_safe(index, &val))
continue;
- printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
+ pr_info(" MSR%08x: %016llx\n", index, val);
}
}
}
@@ -1100,19 +1100,19 @@ void print_cpu_info(struct cpuinfo_x86 *c)
}
if (vendor && !strstr(c->x86_model_id, vendor))
- printk(KERN_CONT "%s ", vendor);
+ pr_cont("%s ", vendor);
if (c->x86_model_id[0])
- printk(KERN_CONT "%s", c->x86_model_id);
+ pr_cont("%s", c->x86_model_id);
else
- printk(KERN_CONT "%d86", c->x86);
+ pr_cont("%d86", c->x86);
- printk(KERN_CONT " (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
+ pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
if (c->x86_mask || c->cpuid_level >= 0)
- printk(KERN_CONT ", stepping: 0x%x)\n", c->x86_mask);
+ pr_cont(", stepping: 0x%x)\n", c->x86_mask);
else
- printk(KERN_CONT ")\n");
+ pr_cont(")\n");
print_cpu_msr(c);
}
@@ -1438,7 +1438,7 @@ void cpu_init(void)
show_ucode_info_early();
- printk(KERN_INFO "Initializing CPU#%d\n", cpu);
+ pr_info("Initializing CPU#%d\n", cpu);
if (cpu_feature_enabled(X86_FEATURE_VME) ||
cpu_has_tsc ||
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index aaf152e79637..187bb583d0df 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -103,7 +103,7 @@ static void check_cx686_slop(struct cpuinfo_x86 *c)
local_irq_restore(flags);
if (ccr5 & 2) { /* possible wrong calibration done */
- printk(KERN_INFO "Recalibrating delay loop with SLOP bit reset\n");
+ pr_info("Recalibrating delay loop with SLOP bit reset\n");
calibrate_delay();
c->loops_per_jiffy = loops_per_jiffy;
}
@@ -115,7 +115,7 @@ static void set_cx86_reorder(void)
{
u8 ccr3;
- printk(KERN_INFO "Enable Memory access reorder on Cyrix/NSC processor.\n");
+ pr_info("Enable Memory access reorder on Cyrix/NSC processor.\n");
ccr3 = getCx86(CX86_CCR3);
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
@@ -128,7 +128,7 @@ static void set_cx86_reorder(void)
static void set_cx86_memwb(void)
{
- printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
+ pr_info("Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
/* CCR2 bit 2: unlock NW bit */
setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04);
@@ -268,7 +268,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
* VSA1 we work around however.
*/
- printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n");
+ pr_info("Working around Cyrix MediaGX virtual DMA bugs.\n");
isa_dma_bridge_buggy = 2;
/* We do this before the PCI layer is running. However we
@@ -426,7 +426,7 @@ static void cyrix_identify(struct cpuinfo_x86 *c)
if (dir0 == 5 || dir0 == 3) {
unsigned char ccr3;
unsigned long flags;
- printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n");
+ pr_info("Enabling CPUID on Cyrix processor.\n");
local_irq_save(flags);
ccr3 = getCx86(CX86_CCR3);
/* enable MAPEN */
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index d820d8eae96b..73d391ae452f 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -56,7 +56,7 @@ detect_hypervisor_vendor(void)
}
if (max_pri)
- printk(KERN_INFO "Hypervisor detected: %s\n", x86_hyper->name);
+ pr_info("Hypervisor detected: %s\n", x86_hyper->name);
}
void init_hypervisor(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 565648bc1a0a..38766c2b5b00 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -61,7 +61,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
*/
if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
c->microcode < 0x20e) {
- printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
+ pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
clear_cpu_cap(c, X86_FEATURE_PSE);
}
@@ -140,7 +140,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
- printk(KERN_INFO "Disabled fast string operations\n");
+ pr_info("Disabled fast string operations\n");
setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
setup_clear_cpu_cap(X86_FEATURE_ERMS);
}
@@ -160,6 +160,19 @@ static void early_init_intel(struct cpuinfo_x86 *c)
pr_info("Disabling PGE capability bit\n");
setup_clear_cpu_cap(X86_FEATURE_PGE);
}
+
+ if (c->cpuid_level >= 0x00000001) {
+ u32 eax, ebx, ecx, edx;
+
+ cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
+ /*
+ * If HTT (EDX[28]) is set EBX[16:23] contain the number of
+ * apicids which are reserved per package. Store the resulting
+ * shift value for the package management code.
+ */
+ if (edx & (1U << 28))
+ c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
+ }
}
#ifdef CONFIG_X86_32
@@ -176,7 +189,7 @@ int ppro_with_ram_bug(void)
boot_cpu_data.x86 == 6 &&
boot_cpu_data.x86_model == 1 &&
boot_cpu_data.x86_mask < 8) {
- printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
+ pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
return 1;
}
return 0;
@@ -225,7 +238,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
set_cpu_bug(c, X86_BUG_F00F);
if (!f00f_workaround_enabled) {
- printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
+ pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
f00f_workaround_enabled = 1;
}
}
@@ -244,7 +257,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
* Forcefully enable PAE if kernel parameter "forcepae" is present.
*/
if (forcepae) {
- printk(KERN_WARNING "PAE forced!\n");
+ pr_warn("PAE forced!\n");
set_cpu_cap(c, X86_FEATURE_PAE);
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
}
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 0b6c52388cf4..6ed779efff26 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -444,7 +444,7 @@ static ssize_t store_cache_disable(struct cacheinfo *this_leaf,
err = amd_set_l3_disable_slot(nb, cpu, slot, val);
if (err) {
if (err == -EEXIST)
- pr_warning("L3 slot %d in use/index already disabled!\n",
+ pr_warn("L3 slot %d in use/index already disabled!\n",
slot);
return err;
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index 4cfba4371a71..517619ea6498 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -115,7 +115,7 @@ static int raise_local(void)
int cpu = m->extcpu;
if (m->inject_flags & MCJ_EXCEPTION) {
- printk(KERN_INFO "Triggering MCE exception on CPU %d\n", cpu);
+ pr_info("Triggering MCE exception on CPU %d\n", cpu);
switch (context) {
case MCJ_CTX_IRQ:
/*
@@ -128,15 +128,15 @@ static int raise_local(void)
raise_exception(m, NULL);
break;
default:
- printk(KERN_INFO "Invalid MCE context\n");
+ pr_info("Invalid MCE context\n");
ret = -EINVAL;
}
- printk(KERN_INFO "MCE exception done on CPU %d\n", cpu);
+ pr_info("MCE exception done on CPU %d\n", cpu);
} else if (m->status) {
- printk(KERN_INFO "Starting machine check poll CPU %d\n", cpu);
+ pr_info("Starting machine check poll CPU %d\n", cpu);
raise_poll(m);
mce_notify_irq();
- printk(KERN_INFO "Machine check poll done on CPU %d\n", cpu);
+ pr_info("Machine check poll done on CPU %d\n", cpu);
} else
m->finished = 0;
@@ -183,8 +183,7 @@ static void raise_mce(struct mce *m)
start = jiffies;
while (!cpumask_empty(mce_inject_cpumask)) {
if (!time_before(jiffies, start + 2*HZ)) {
- printk(KERN_ERR
- "Timeout waiting for mce inject %lx\n",
+ pr_err("Timeout waiting for mce inject %lx\n",
*cpumask_bits(mce_inject_cpumask));
break;
}
@@ -241,7 +240,7 @@ static int inject_init(void)
{
if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
return -ENOMEM;
- printk(KERN_INFO "Machine check injector initialized\n");
+ pr_info("Machine check injector initialized\n");
register_mce_write_callback(mce_write);
register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0,
"mce_notify");
diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
index 12402e10aeff..2a0717bf8033 100644
--- a/arch/x86/kernel/cpu/mcheck/p5.c
+++ b/arch/x86/kernel/cpu/mcheck/p5.c
@@ -26,14 +26,12 @@ static void pentium_machine_check(struct pt_regs *regs, long error_code)
rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi);
rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi);
- printk(KERN_EMERG
- "CPU#%d: Machine Check Exception: 0x%8X (type 0x%8X).\n",
- smp_processor_id(), loaddr, lotype);
+ pr_emerg("CPU#%d: Machine Check Exception: 0x%8X (type 0x%8X).\n",
+ smp_processor_id(), loaddr, lotype);
if (lotype & (1<<5)) {
- printk(KERN_EMERG
- "CPU#%d: Possible thermal failure (CPU on fire ?).\n",
- smp_processor_id());
+ pr_emerg("CPU#%d: Possible thermal failure (CPU on fire ?).\n",
+ smp_processor_id());
}
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
@@ -61,12 +59,10 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
/* Read registers before enabling: */
rdmsr(MSR_IA32_P5_MC_ADDR, l, h);
rdmsr(MSR_IA32_P5_MC_TYPE, l, h);
- printk(KERN_INFO
- "Intel old style machine check architecture supported.\n");
+ pr_info("Intel old style machine check architecture supported.\n");
/* Enable MCE: */
cr4_set_bits(X86_CR4_MCE);
- printk(KERN_INFO
- "Intel old style machine check reporting enabled on CPU#%d.\n",
- smp_processor_id());
+ pr_info("Intel old style machine check reporting enabled on CPU#%d.\n",
+ smp_processor_id());
}
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 2c5aaf8c2e2f..0b445c2ff735 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -190,7 +190,7 @@ static int therm_throt_process(bool new_event, int event, int level)
/* if we just entered the thermal event */
if (new_event) {
if (event == THERMAL_THROTTLING_EVENT)
- printk(KERN_CRIT "CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
+ pr_crit("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
this_cpu,
level == CORE_LEVEL ? "Core" : "Package",
state->count);
@@ -198,8 +198,7 @@ static int therm_throt_process(bool new_event, int event, int level)
}
if (old_event) {
if (event == THERMAL_THROTTLING_EVENT)
- printk(KERN_INFO "CPU%d: %s temperature/speed normal\n",
- this_cpu,
+ pr_info("CPU%d: %s temperature/speed normal\n", this_cpu,
level == CORE_LEVEL ? "Core" : "Package");
return 1;
}
@@ -417,8 +416,8 @@ static void intel_thermal_interrupt(void)
static void unexpected_thermal_interrupt(void)
{
- printk(KERN_ERR "CPU%d: Unexpected LVT thermal interrupt!\n",
- smp_processor_id());
+ pr_err("CPU%d: Unexpected LVT thermal interrupt!\n",
+ smp_processor_id());
}
static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
@@ -499,7 +498,7 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
if (system_state == SYSTEM_BOOTING)
- printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n", cpu);
+ pr_debug("CPU%d: Thermal monitoring handled by SMI\n", cpu);
return;
}
@@ -557,8 +556,8 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
l = apic_read(APIC_LVTTHMR);
apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
- printk_once(KERN_INFO "CPU0: Thermal monitoring enabled (%s)\n",
- tm2 ? "TM2" : "TM1");
+ pr_info_once("CPU0: Thermal monitoring enabled (%s)\n",
+ tm2 ? "TM2" : "TM1");
/* enable thermal throttle processing */
atomic_set(&therm_throt_en, 1);
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c
index 7245980186ee..fcf9ae9384f4 100644
--- a/arch/x86/kernel/cpu/mcheck/threshold.c
+++ b/arch/x86/kernel/cpu/mcheck/threshold.c
@@ -12,8 +12,8 @@
static void default_threshold_interrupt(void)
{
- printk(KERN_ERR "Unexpected threshold interrupt at vector %x\n",
- THRESHOLD_APIC_VECTOR);
+ pr_err("Unexpected threshold interrupt at vector %x\n",
+ THRESHOLD_APIC_VECTOR);
}
void (*mce_threshold_vector)(void) = default_threshold_interrupt;
diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
index 01dd8702880b..c6a722e1d011 100644
--- a/arch/x86/kernel/cpu/mcheck/winchip.c
+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
@@ -17,7 +17,7 @@ static void winchip_machine_check(struct pt_regs *regs, long error_code)
{
ist_enter(regs);
- printk(KERN_EMERG "CPU0: Machine Check Exception.\n");
+ pr_emerg("CPU0: Machine Check Exception.\n");
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
ist_exit(regs);
@@ -39,6 +39,5 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
cr4_set_bits(X86_CR4_MCE);
- printk(KERN_INFO
- "Winchip machine check reporting enabled on CPU#0.\n");
+ pr_info("Winchip machine check reporting enabled on CPU#0.\n");
}
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 2233f8a76615..75d3aab5f7b2 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -953,7 +953,7 @@ struct microcode_ops * __init init_amd_microcode(void)
struct cpuinfo_x86 *c = &boot_cpu_data;
if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
- pr_warning("AMD CPU family 0x%x not supported\n", c->x86);
+ pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
return NULL;
}
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 20e242ea1bc4..4e7c6933691c 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -161,8 +161,8 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
- printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n",
- ms_hyperv.features, ms_hyperv.hints);
+ pr_info("HyperV: features 0x%x, hints 0x%x\n",
+ ms_hyperv.features, ms_hyperv.hints);
#ifdef CONFIG_X86_LOCAL_APIC
if (ms_hyperv.features & HV_X64_MSR_APIC_FREQUENCY_AVAILABLE) {
@@ -174,8 +174,8 @@ static void __init ms_hyperv_init_platform(void)
rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency);
hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ);
lapic_timer_frequency = hv_lapic_frequency;
- printk(KERN_INFO "HyperV: LAPIC Timer Frequency: %#x\n",
- lapic_timer_frequency);
+ pr_info("HyperV: LAPIC Timer Frequency: %#x\n",
+ lapic_timer_frequency);
}
#endif
diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
index 316fe3e60a97..3d689937fc1b 100644
--- a/arch/x86/kernel/cpu/mtrr/centaur.c
+++ b/arch/x86/kernel/cpu/mtrr/centaur.c
@@ -103,7 +103,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
*/
if (type != MTRR_TYPE_WRCOMB &&
(centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) {
- pr_warning("mtrr: only write-combining%s supported\n",
+ pr_warn("mtrr: only write-combining%s supported\n",
centaur_mcr_type ? " and uncacheable are" : " is");
return -EINVAL;
}
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index 0d98503c2245..31e951ce6dff 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -57,9 +57,9 @@ static int __initdata nr_range;
static struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
static int __initdata debug_print;
-#define Dprintk(x...) do { if (debug_print) printk(KERN_DEBUG x); } while (0)
+#define Dprintk(x...) do { if (debug_print) pr_debug(x); } while (0)
-#define BIOS_BUG_MSG KERN_WARNING \
+#define BIOS_BUG_MSG \
"WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n"
static int __init
@@ -81,9 +81,9 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range,
base, base + size);
}
if (debug_print) {
- printk(KERN_DEBUG "After WB checking\n");
+ pr_debug("After WB checking\n");
for (i = 0; i < nr_range; i++)
- printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n",
+ pr_debug("MTRR MAP PFN: %016llx - %016llx\n",
range[i].start, range[i].end);
}
@@ -101,7 +101,7 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range,
(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) &&
(mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) {
/* Var MTRR contains UC entry below 1M? Skip it: */
- printk(BIOS_BUG_MSG, i);
+ pr_warn(BIOS_BUG_MSG, i);
if (base + size <= (1<<(20-PAGE_SHIFT)))
continue;
size -= (1<<(20-PAGE_SHIFT)) - base;
@@ -114,11 +114,11 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range,
extra_remove_base + extra_remove_size);
if (debug_print) {
- printk(KERN_DEBUG "After UC checking\n");
+ pr_debug("After UC checking\n");
for (i = 0; i < RANGE_NUM; i++) {
if (!range[i].end)
continue;
- printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n",
+ pr_debug("MTRR MAP PFN: %016llx - %016llx\n",
range[i].start, range[i].end);
}
}
@@ -126,9 +126,9 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range,
/* sort the ranges */
nr_range = clean_sort_range(range, RANGE_NUM);
if (debug_print) {
- printk(KERN_DEBUG "After sorting\n");
+ pr_debug("After sorting\n");
for (i = 0; i < nr_range; i++)
- printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n",
+ pr_debug("MTRR MAP PFN: %016llx - %016llx\n",
range[i].start, range[i].end);
}
@@ -544,7 +544,7 @@ static void __init print_out_mtrr_range_state(void)
start_base = to_size_factor(start_base, &start_factor),
type = range_state[i].type;
- printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n",
+ pr_debug("reg %d, base: %ld%cB, range: %ld%cB, type %s\n",
i, start_base, start_factor,
size_base, size_factor,
(type == MTRR_TYPE_UNCACHABLE) ? "UC" :
@@ -713,7 +713,7 @@ int __init mtrr_cleanup(unsigned address_bits)
return 0;
/* Print original var MTRRs at first, for debugging: */
- printk(KERN_DEBUG "original variable MTRRs\n");
+ pr_debug("original variable MTRRs\n");
print_out_mtrr_range_state();
memset(range, 0, sizeof(range));
@@ -733,7 +733,7 @@ int __init mtrr_cleanup(unsigned address_bits)
x_remove_base, x_remove_size);
range_sums = sum_ranges(range, nr_range);
- printk(KERN_INFO "total RAM covered: %ldM\n",
+ pr_info("total RAM covered: %ldM\n",
range_sums >> (20 - PAGE_SHIFT));
if (mtrr_chunk_size && mtrr_gran_size) {
@@ -745,12 +745,11 @@ int __init mtrr_cleanup(unsigned address_bits)
if (!result[i].bad) {
set_var_mtrr_all(address_bits);
- printk(KERN_DEBUG "New variable MTRRs\n");
+ pr_debug("New variable MTRRs\n");
print_out_mtrr_range_state();
return 1;
}
- printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, "
- "will find optimal one\n");
+ pr_info("invalid mtrr_gran_size or mtrr_chunk_size, will find optimal one\n");
}
i = 0;
@@ -768,7 +767,7 @@ int __init mtrr_cleanup(unsigned address_bits)
x_remove_base, x_remove_size, i);
if (debug_print) {
mtrr_print_out_one_result(i);
- printk(KERN_INFO "\n");
+ pr_info("\n");
}
i++;
@@ -779,7 +778,7 @@ int __init mtrr_cleanup(unsigned address_bits)
index_good = mtrr_search_optimal_index();
if (index_good != -1) {
- printk(KERN_INFO "Found optimal setting for mtrr clean up\n");
+ pr_info("Found optimal setting for mtrr clean up\n");
i = index_good;
mtrr_print_out_one_result(i);
@@ -790,7 +789,7 @@ int __init mtrr_cleanup(unsigned address_bits)
gran_size <<= 10;
x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
set_var_mtrr_all(address_bits);
- printk(KERN_DEBUG "New variable MTRRs\n");
+ pr_debug("New variable MTRRs\n");
print_out_mtrr_range_state();
return 1;
} else {
@@ -799,8 +798,8 @@ int __init mtrr_cleanup(unsigned address_bits)
mtrr_print_out_one_result(i);
}
- printk(KERN_INFO "mtrr_cleanup: can not find optimal value\n");
- printk(KERN_INFO "please specify mtrr_gran_size/mtrr_chunk_size\n");
+ pr_info("mtrr_cleanup: can not find optimal value\n");
+ pr_info("please specify mtrr_gran_size/mtrr_chunk_size\n");
return 0;
}
@@ -918,7 +917,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
/* kvm/qemu doesn't have mtrr set right, don't trim them all: */
if (!highest_pfn) {
- printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n");
+ pr_info("CPU MTRRs all blank - virtualized system.\n");
return 0;
}
@@ -973,7 +972,8 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
end_pfn);
if (total_trim_size) {
- pr_warning("WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.\n", total_trim_size >> 20);
+ pr_warn("WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.\n",
+ total_trim_size >> 20);
if (!changed_by_mtrr_cleanup)
WARN_ON(1);
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index c870af161008..fcbcb2f678ca 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -55,7 +55,7 @@ static inline void k8_check_syscfg_dram_mod_en(void)
rdmsr(MSR_K8_SYSCFG, lo, hi);
if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
- printk(KERN_ERR FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
+ pr_err(FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
" not cleared by BIOS, clearing this bit\n",
smp_processor_id());
lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
@@ -501,14 +501,14 @@ void __init mtrr_state_warn(void)
if (!mask)
return;
if (mask & MTRR_CHANGE_MASK_FIXED)
- pr_warning("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
+ pr_warn("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
if (mask & MTRR_CHANGE_MASK_VARIABLE)
- pr_warning("mtrr: your CPUs had inconsistent variable MTRR settings\n");
+ pr_warn("mtrr: your CPUs had inconsistent variable MTRR settings\n");
if (mask & MTRR_CHANGE_MASK_DEFTYPE)
- pr_warning("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
+ pr_warn("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
- printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
- printk(KERN_INFO "mtrr: corrected configuration.\n");
+ pr_info("mtrr: probably your BIOS does not setup all CPUs.\n");
+ pr_info("mtrr: corrected configuration.\n");
}
/*
@@ -519,8 +519,7 @@ void __init mtrr_state_warn(void)
void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
{
if (wrmsr_safe(msr, a, b) < 0) {
- printk(KERN_ERR
- "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
+ pr_err("MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
smp_processor_id(), msr, a, b);
}
}
@@ -607,7 +606,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
tmp |= ~((1ULL<<(hi - 1)) - 1);
if (tmp != mask) {
- printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
+ pr_warn("mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
mask = tmp;
}
@@ -858,13 +857,13 @@ int generic_validate_add_page(unsigned long base, unsigned long size,
boot_cpu_data.x86_model == 1 &&
boot_cpu_data.x86_mask <= 7) {
if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
- pr_warning("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
+ pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
return -EINVAL;
}
if (!(base + size < 0x70000 || base > 0x7003F) &&
(type == MTRR_TYPE_WRCOMB
|| type == MTRR_TYPE_WRBACK)) {
- pr_warning("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
+ pr_warn("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
return -EINVAL;
}
}
@@ -878,7 +877,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size,
lbase = lbase >> 1, last = last >> 1)
;
if (lbase != last) {
- pr_warning("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size);
+ pr_warn("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size);
return -EINVAL;
}
return 0;
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 5c3d149ee91c..ba80d68f683e 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -300,24 +300,24 @@ int mtrr_add_page(unsigned long base, unsigned long size,
return error;
if (type >= MTRR_NUM_TYPES) {
- pr_warning("mtrr: type: %u invalid\n", type);
+ pr_warn("mtrr: type: %u invalid\n", type);
return -EINVAL;
}
/* If the type is WC, check that this processor supports it */
if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
- pr_warning("mtrr: your processor doesn't support write-combining\n");
+ pr_warn("mtrr: your processor doesn't support write-combining\n");
return -ENOSYS;
}
if (!size) {
- pr_warning("mtrr: zero sized request\n");
+ pr_warn("mtrr: zero sized request\n");
return -EINVAL;
}
if ((base | (base + size - 1)) >>
(boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) {
- pr_warning("mtrr: base or size exceeds the MTRR width\n");
+ pr_warn("mtrr: base or size exceeds the MTRR width\n");
return -EINVAL;
}
@@ -348,7 +348,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
} else if (types_compatible(type, ltype))
continue;
}
- pr_warning("mtrr: 0x%lx000,0x%lx000 overlaps existing"
+ pr_warn("mtrr: 0x%lx000,0x%lx000 overlaps existing"
" 0x%lx000,0x%lx000\n", base, size, lbase,
lsize);
goto out;
@@ -357,7 +357,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
if (ltype != type) {
if (types_compatible(type, ltype))
continue;
- pr_warning("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
+ pr_warn("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
base, size, mtrr_attrib_to_str(ltype),
mtrr_attrib_to_str(type));
goto out;
@@ -395,7 +395,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
static int mtrr_check(unsigned long base, unsigned long size)
{
if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
- pr_warning("mtrr: size and base must be multiples of 4 kiB\n");
+ pr_warn("mtrr: size and base must be multiples of 4 kiB\n");
pr_debug("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
dump_stack();
return -1;
@@ -493,16 +493,16 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
}
}
if (reg >= max) {
- pr_warning("mtrr: register: %d too big\n", reg);
+ pr_warn("mtrr: register: %d too big\n", reg);
goto out;
}
mtrr_if->get(reg, &lbase, &lsize, &ltype);
if (lsize < 1) {
- pr_warning("mtrr: MTRR %d not used\n", reg);
+ pr_warn("mtrr: MTRR %d not used\n", reg);
goto out;
}
if (mtrr_usage_table[reg] < 1) {
- pr_warning("mtrr: reg: %d has count=0\n", reg);
+ pr_warn("mtrr: reg: %d has count=0\n", reg);
goto out;
}
if (--mtrr_usage_table[reg] < 1)
diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c
index 819d94982e07..f6f50c4ceaec 100644
--- a/arch/x86/kernel/cpu/rdrand.c
+++ b/arch/x86/kernel/cpu/rdrand.c
@@ -51,7 +51,7 @@ void x86_init_rdrand(struct cpuinfo_x86 *c)
for (i = 0; i < SANITY_CHECK_LOOPS; i++) {
if (!rdrand_long(&tmp)) {
clear_cpu_cap(c, X86_FEATURE_RDRAND);
- printk_once(KERN_WARNING "rdrand: disabled\n");
+ pr_warn_once("rdrand: disabled\n");
return;
}
}
diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
index 4c60eaf0571c..cd531355e838 100644
--- a/arch/x86/kernel/cpu/topology.c
+++ b/arch/x86/kernel/cpu/topology.c
@@ -87,10 +87,10 @@ void detect_extended_topology(struct cpuinfo_x86 *c)
c->x86_max_cores = (core_level_siblings / smp_num_siblings);
if (!printed) {
- printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
+ pr_info("CPU: Physical Processor ID: %d\n",
c->phys_proc_id);
if (c->x86_max_cores > 1)
- printk(KERN_INFO "CPU: Processor Core ID: %d\n",
+ pr_info("CPU: Processor Core ID: %d\n",
c->cpu_core_id);
printed = 1;
}
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
index 252da7aceca6..e3b4d1841175 100644
--- a/arch/x86/kernel/cpu/transmeta.c
+++ b/arch/x86/kernel/cpu/transmeta.c
@@ -33,7 +33,7 @@ static void init_transmeta(struct cpuinfo_x86 *c)
if (max >= 0x80860001) {
cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags);
if (cpu_rev != 0x02000000) {
- printk(KERN_INFO "CPU: Processor revision %u.%u.%u.%u, %u MHz\n",
+ pr_info("CPU: Processor revision %u.%u.%u.%u, %u MHz\n",
(cpu_rev >> 24) & 0xff,
(cpu_rev >> 16) & 0xff,
(cpu_rev >> 8) & 0xff,
@@ -44,10 +44,10 @@ static void init_transmeta(struct cpuinfo_x86 *c)
if (max >= 0x80860002) {
cpuid(0x80860002, &new_cpu_rev, &cms_rev1, &cms_rev2, &dummy);
if (cpu_rev == 0x02000000) {
- printk(KERN_INFO "CPU: Processor revision %08X, %u MHz\n",
+ pr_info("CPU: Processor revision %08X, %u MHz\n",
new_cpu_rev, cpu_freq);
}
- printk(KERN_INFO "CPU: Code Morphing Software revision %u.%u.%u-%u-%u\n",
+ pr_info("CPU: Code Morphing Software revision %u.%u.%u-%u-%u\n",
(cms_rev1 >> 24) & 0xff,
(cms_rev1 >> 16) & 0xff,
(cms_rev1 >> 8) & 0xff,
@@ -76,7 +76,7 @@ static void init_transmeta(struct cpuinfo_x86 *c)
(void *)&cpu_info[56],
(void *)&cpu_info[60]);
cpu_info[64] = '\0';
- printk(KERN_INFO "CPU: %s\n", cpu_info);
+ pr_info("CPU: %s\n", cpu_info);
}
/* Unhide possibly hidden capability flags */
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 628a059a9a06..364e58346897 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -62,7 +62,7 @@ static unsigned long vmware_get_tsc_khz(void)
tsc_hz = eax | (((uint64_t)ebx) << 32);
do_div(tsc_hz, 1000);
BUG_ON(tsc_hz >> 32);
- printk(KERN_INFO "TSC freq read from hypervisor : %lu.%03lu MHz\n",
+ pr_info("TSC freq read from hypervisor : %lu.%03lu MHz\n",
(unsigned long) tsc_hz / 1000,
(unsigned long) tsc_hz % 1000);
@@ -84,8 +84,7 @@ static void __init vmware_platform_setup(void)
if (ebx != UINT_MAX)
x86_platform.calibrate_tsc = vmware_get_tsc_khz;
else
- printk(KERN_WARNING
- "Failed to get TSC freq from the hypervisor\n");
+ pr_warn("Failed to get TSC freq from the hypervisor\n");
}
/*
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 30ca7607cbbb..97340f2c437c 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -408,7 +408,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
processor.cpuflag = CPU_ENABLED;
processor.cpufeature = (boot_cpu_data.x86 << 8) |
(boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
- processor.featureflag = boot_cpu_data.x86_capability[0];
+ processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
processor.reserved[0] = 0;
processor.reserved[1] = 0;
for (i = 0; i < 2; i++) {
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 8a2cdd736fa4..04b132a767f1 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -30,6 +30,7 @@
#include <asm/nmi.h>
#include <asm/x86_init.h>
#include <asm/reboot.h>
+#include <asm/cache.h>
#define CREATE_TRACE_POINTS
#include <trace/events/nmi.h>
@@ -69,7 +70,7 @@ struct nmi_stats {
static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
-static int ignore_nmis;
+static int ignore_nmis __read_mostly;
int unknown_nmi_panic;
/*
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 24d57f77b3c1..3bf1e0b5f827 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -97,6 +97,14 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
EXPORT_PER_CPU_SYMBOL(cpu_info);
+/* Logical package management. We might want to allocate that dynamically */
+static int *physical_to_logical_pkg __read_mostly;
+static unsigned long *physical_package_map __read_mostly;;
+static unsigned long *logical_package_map __read_mostly;
+static unsigned int max_physical_pkg_id __read_mostly;
+unsigned int __max_logical_packages __read_mostly;
+EXPORT_SYMBOL(__max_logical_packages);
+
static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
{
unsigned long flags;
@@ -251,6 +259,97 @@ static void notrace start_secondary(void *unused)
cpu_startup_entry(CPUHP_ONLINE);
}
+int topology_update_package_map(unsigned int apicid, unsigned int cpu)
+{
+ unsigned int new, pkg = apicid >> boot_cpu_data.x86_coreid_bits;
+
+ /* Called from early boot ? */
+ if (!physical_package_map)
+ return 0;
+
+ if (pkg >= max_physical_pkg_id)
+ return -EINVAL;
+
+ /* Set the logical package id */
+ if (test_and_set_bit(pkg, physical_package_map))
+ goto found;
+
+ if (pkg < __max_logical_packages) {
+ set_bit(pkg, logical_package_map);
+ physical_to_logical_pkg[pkg] = pkg;
+ goto found;
+ }
+ new = find_first_zero_bit(logical_package_map, __max_logical_packages);
+ if (new >= __max_logical_packages) {
+ physical_to_logical_pkg[pkg] = -1;
+ pr_warn("APIC(%x) Package %u exceeds logical package map\n",
+ apicid, pkg);
+ return -ENOSPC;
+ }
+ set_bit(new, logical_package_map);
+ pr_info("APIC(%x) Converting physical %u to logical package %u\n",
+ apicid, pkg, new);
+ physical_to_logical_pkg[pkg] = new;
+
+found:
+ cpu_data(cpu).logical_proc_id = physical_to_logical_pkg[pkg];
+ return 0;
+}
+
+/**
+ * topology_phys_to_logical_pkg - Map a physical package id to a logical
+ *
+ * Returns logical package id or -1 if not found
+ */
+int topology_phys_to_logical_pkg(unsigned int phys_pkg)
+{
+ if (phys_pkg >= max_physical_pkg_id)
+ return -1;
+ return physical_to_logical_pkg[phys_pkg];
+}
+EXPORT_SYMBOL(topology_phys_to_logical_pkg);
+
+static void __init smp_init_package_map(void)
+{
+ unsigned int ncpus, cpu;
+ size_t size;
+
+ /*
+ * Today neither Intel nor AMD support heterogenous systems. That
+ * might change in the future....
+ */
+ ncpus = boot_cpu_data.x86_max_cores * smp_num_siblings;
+ __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus);
+
+ /*
+ * Possibly larger than what we need as the number of apic ids per
+ * package can be smaller than the actual used apic ids.
+ */
+ max_physical_pkg_id = DIV_ROUND_UP(MAX_LOCAL_APIC, ncpus);
+ size = max_physical_pkg_id * sizeof(unsigned int);
+ physical_to_logical_pkg = kmalloc(size, GFP_KERNEL);
+ memset(physical_to_logical_pkg, 0xff, size);
+ size = BITS_TO_LONGS(max_physical_pkg_id) * sizeof(unsigned long);
+ physical_package_map = kzalloc(size, GFP_KERNEL);
+ size = BITS_TO_LONGS(__max_logical_packages) * sizeof(unsigned long);
+ logical_package_map = kzalloc(size, GFP_KERNEL);
+
+ pr_info("Max logical packages: %u\n", __max_logical_packages);
+
+ for_each_present_cpu(cpu) {
+ unsigned int apicid = apic->cpu_present_to_apicid(cpu);
+
+ if (apicid == BAD_APICID || !apic->apic_id_valid(apicid))
+ continue;
+ if (!topology_update_package_map(apicid, cpu))
+ continue;
+ pr_warn("CPU %u APICId %x disabled\n", cpu, apicid);
+ per_cpu(x86_bios_cpu_apicid, cpu) = BAD_APICID;
+ set_cpu_possible(cpu, false);
+ set_cpu_present(cpu, false);
+ }
+}
+
void __init smp_store_boot_cpu_info(void)
{
int id = 0; /* CPU 0 */
@@ -258,6 +357,7 @@ void __init smp_store_boot_cpu_info(void)
*c = boot_cpu_data;
c->cpu_index = id;
+ smp_init_package_map();
}
/*
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index f56cc418c87d..fd57d3ae7e16 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1529,7 +1529,7 @@ __init void lguest_init(void)
*/
cpu_detect(&new_cpu_data);
/* head.S usually sets up the first capability word, so do it here. */
- new_cpu_data.x86_capability[0] = cpuid_edx(1);
+ new_cpu_data.x86_capability[CPUID_1_EDX] = cpuid_edx(1);
/* Math is always hard! */
set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index d09e4c9d7cc5..2c261082eadf 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1654,7 +1654,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
cpu_detect(&new_cpu_data);
set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU);
new_cpu_data.wp_works_ok = 1;
- new_cpu_data.x86_capability[0] = cpuid_edx(1);
+ new_cpu_data.x86_capability[CPUID_1_EDX] = cpuid_edx(1);
#endif
if (xen_start_info->mod_start) {
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index 724a08740a04..9466354d3e49 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -11,7 +11,7 @@
#include "pmu.h"
/* x86_pmu.handle_irq definition */
-#include "../kernel/cpu/perf_event.h"
+#include "../events/perf_event.h"
#define XENPMU_IRQ_PROCESSING 1
struct xenpmu {
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index f5c5a3fa2c81..a9d8cab18b00 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -468,6 +468,7 @@ struct perf_event {
int group_flags;
struct perf_event *group_leader;
struct pmu *pmu;
+ void *pmu_private;
enum perf_event_active_state state;
unsigned int attach_state;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 614614821f00..b7231498de47 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6785,7 +6785,7 @@ static void swevent_hlist_release(struct swevent_htable *swhash)
kfree_rcu(hlist, rcu_head);
}
-static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
+static void swevent_hlist_put_cpu(int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
@@ -6797,15 +6797,15 @@ static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
mutex_unlock(&swhash->hlist_mutex);
}
-static void swevent_hlist_put(struct perf_event *event)
+static void swevent_hlist_put(void)
{
int cpu;
for_each_possible_cpu(cpu)
- swevent_hlist_put_cpu(event, cpu);
+ swevent_hlist_put_cpu(cpu);
}
-static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
+static int swevent_hlist_get_cpu(int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
int err = 0;
@@ -6828,14 +6828,13 @@ exit:
return err;
}
-static int swevent_hlist_get(struct perf_event *event)
+static int swevent_hlist_get(void)
{
- int err;
- int cpu, failed_cpu;
+ int err, cpu, failed_cpu;
get_online_cpus();
for_each_possible_cpu(cpu) {
- err = swevent_hlist_get_cpu(event, cpu);
+ err = swevent_hlist_get_cpu(cpu);
if (err) {
failed_cpu = cpu;
goto fail;
@@ -6848,7 +6847,7 @@ fail:
for_each_possible_cpu(cpu) {
if (cpu == failed_cpu)
break;
- swevent_hlist_put_cpu(event, cpu);
+ swevent_hlist_put_cpu(cpu);
}
put_online_cpus();
@@ -6864,7 +6863,7 @@ static void sw_perf_event_destroy(struct perf_event *event)
WARN_ON(event->parent);
static_key_slow_dec(&perf_swevent_enabled[event_id]);
- swevent_hlist_put(event);
+ swevent_hlist_put();
}
static int perf_swevent_init(struct perf_event *event)
@@ -6895,7 +6894,7 @@ static int perf_swevent_init(struct perf_event *event)
if (!event->parent) {
int err;
- err = swevent_hlist_get(event);
+ err = swevent_hlist_get();
if (err)
return err;
@@ -8001,6 +8000,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
}
}
+ /* symmetric to unaccount_event() in _free_event() */
+ account_event(event);
+
return event;
err_per_task:
@@ -8364,8 +8366,6 @@ SYSCALL_DEFINE5(perf_event_open,
}
}
- account_event(event);
-
/*
* Special case software events and allow them to be part of
* any hardware group.
@@ -8662,8 +8662,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
/* Mark owner so we could distinguish it from user events. */
event->owner = TASK_TOMBSTONE;
- account_event(event);
-
ctx = find_get_context(event->pmu, task, event);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
@@ -9447,6 +9445,7 @@ ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
return 0;
}
+EXPORT_SYMBOL_GPL(perf_event_sysfs_show);
static int __init perf_event_sysfs_init(void)
{
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index c9956440d0e6..21b81a41dae5 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -30,7 +30,7 @@
struct trace_kprobe {
struct list_head list;
struct kretprobe rp; /* Use rp.kp for kprobe use */
- unsigned long nhit;
+ unsigned long __percpu *nhit;
const char *symbol; /* symbol name */
struct trace_probe tp;
};
@@ -274,6 +274,10 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
if (!tk)
return ERR_PTR(ret);
+ tk->nhit = alloc_percpu(unsigned long);
+ if (!tk->nhit)
+ goto error;
+
if (symbol) {
tk->symbol = kstrdup(symbol, GFP_KERNEL);
if (!tk->symbol)
@@ -313,6 +317,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
error:
kfree(tk->tp.call.name);
kfree(tk->symbol);
+ free_percpu(tk->nhit);
kfree(tk);
return ERR_PTR(ret);
}
@@ -327,6 +332,7 @@ static void free_trace_kprobe(struct trace_kprobe *tk)
kfree(tk->tp.call.class->system);
kfree(tk->tp.call.name);
kfree(tk->symbol);
+ free_percpu(tk->nhit);
kfree(tk);
}
@@ -874,9 +880,14 @@ static const struct file_operations kprobe_events_ops = {
static int probes_profile_seq_show(struct seq_file *m, void *v)
{
struct trace_kprobe *tk = v;
+ unsigned long nhit = 0;
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ nhit += *per_cpu_ptr(tk->nhit, cpu);
seq_printf(m, " %-44s %15lu %15lu\n",
- trace_event_name(&tk->tp.call), tk->nhit,
+ trace_event_name(&tk->tp.call), nhit,
tk->rp.kp.nmissed);
return 0;
@@ -1225,7 +1236,7 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
{
struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
- tk->nhit++;
+ raw_cpu_inc(*tk->nhit);
if (tk->tp.flags & TP_FLAG_TRACE)
kprobe_trace_func(tk, regs);
@@ -1242,7 +1253,7 @@ kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
{
struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
- tk->nhit++;
+ raw_cpu_inc(*tk->nhit);
if (tk->tp.flags & TP_FLAG_TRACE)
kretprobe_trace_func(tk, ri, regs);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 0655afbea83f..d1663083d903 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -186,11 +186,11 @@ print_syscall_exit(struct trace_iterator *iter, int flags,
extern char *__bad_type_size(void);
-#define SYSCALL_FIELD(type, name) \
- sizeof(type) != sizeof(trace.name) ? \
+#define SYSCALL_FIELD(type, field, name) \
+ sizeof(type) != sizeof(trace.field) ? \
__bad_type_size() : \
- #type, #name, offsetof(typeof(trace), name), \
- sizeof(trace.name), is_signed_type(type)
+ #type, #name, offsetof(typeof(trace), field), \
+ sizeof(trace.field), is_signed_type(type)
static int __init
__set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
@@ -261,7 +261,8 @@ static int __init syscall_enter_define_fields(struct trace_event_call *call)
int i;
int offset = offsetof(typeof(trace), args);
- ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
+ ret = trace_define_field(call, SYSCALL_FIELD(int, nr, __syscall_nr),
+ FILTER_OTHER);
if (ret)
return ret;
@@ -281,11 +282,12 @@ static int __init syscall_exit_define_fields(struct trace_event_call *call)
struct syscall_trace_exit trace;
int ret;
- ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
+ ret = trace_define_field(call, SYSCALL_FIELD(int, nr, __syscall_nr),
+ FILTER_OTHER);
if (ret)
return ret;
- ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
+ ret = trace_define_field(call, SYSCALL_FIELD(long, ret, ret),
FILTER_OTHER);
return ret;
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 5a70f6196f57..81dedaab36cc 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -41,6 +41,7 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
break;
return i;
}
+EXPORT_SYMBOL(cpumask_any_but);
/* These are not inline because of header tangles. */
#ifdef CONFIG_CPUMASK_OFFSTACK
diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build
index 4a96473b180f..ee566e8bd1cf 100644
--- a/tools/build/Makefile.build
+++ b/tools/build/Makefile.build
@@ -85,7 +85,7 @@ $(OUTPUT)%.i: %.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_i_c)
-$(OUTPUT)%.i: %.S FORCE
+$(OUTPUT)%.s: %.S FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_i_c)
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 02db3cdff20f..6b7707270aa3 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -27,7 +27,7 @@ endef
# the rule that uses them - an example for that is the 'bionic'
# feature check. ]
#
-FEATURE_TESTS ?= \
+FEATURE_TESTS_BASIC := \
backtrace \
dwarf \
fortify-source \
@@ -46,6 +46,7 @@ FEATURE_TESTS ?= \
libpython \
libpython-version \
libslang \
+ libcrypto \
libunwind \
pthread-attr-setaffinity-np \
stackprotector-all \
@@ -56,6 +57,25 @@ FEATURE_TESTS ?= \
get_cpuid \
bpf
+# FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
+# of all feature tests
+FEATURE_TESTS_EXTRA := \
+ bionic \
+ compile-32 \
+ compile-x32 \
+ cplus-demangle \
+ hello \
+ libbabeltrace \
+ liberty \
+ liberty-z \
+ libunwind-debug-frame
+
+FEATURE_TESTS ?= $(FEATURE_TESTS_BASIC)
+
+ifeq ($(FEATURE_TESTS),all)
+ FEATURE_TESTS := $(FEATURE_TESTS_BASIC) $(FEATURE_TESTS_EXTRA)
+endif
+
FEATURE_DISPLAY ?= \
dwarf \
glibc \
@@ -68,6 +88,7 @@ FEATURE_DISPLAY ?= \
libperl \
libpython \
libslang \
+ libcrypto \
libunwind \
libdw-dwarf-unwind \
zlib \
@@ -100,6 +121,14 @@ ifeq ($(feature-all), 1)
# test-all.c passed - just set all the core feature flags to 1:
#
$(foreach feat,$(FEATURE_TESTS),$(call feature_set,$(feat)))
+ #
+ # test-all.c does not comprise these tests, so we need to
+ # for this case to get features proper values
+ #
+ $(call feature_check,compile-32)
+ $(call feature_check,compile-x32)
+ $(call feature_check,bionic)
+ $(call feature_check,libbabeltrace)
else
$(foreach feat,$(FEATURE_TESTS),$(call feature_check,$(feat)))
endif
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index bf8f0352264d..c5f4c417428d 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -23,6 +23,7 @@ FILES= \
test-libpython.bin \
test-libpython-version.bin \
test-libslang.bin \
+ test-libcrypto.bin \
test-libunwind.bin \
test-libunwind-debug-frame.bin \
test-pthread-attr-setaffinity-np.bin \
@@ -105,6 +106,9 @@ $(OUTPUT)test-libaudit.bin:
$(OUTPUT)test-libslang.bin:
$(BUILD) -I/usr/include/slang -lslang
+$(OUTPUT)test-libcrypto.bin:
+ $(BUILD) -lcrypto
+
$(OUTPUT)test-gtk2.bin:
$(BUILD) $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null)
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index 81025cade45f..e499a36c1e4a 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -129,6 +129,10 @@
# include "test-bpf.c"
#undef main
+#define main main_test_libcrypto
+# include "test-libcrypto.c"
+#undef main
+
int main(int argc, char *argv[])
{
main_test_libpython();
@@ -158,6 +162,7 @@ int main(int argc, char *argv[])
main_test_lzma();
main_test_get_cpuid();
main_test_bpf();
+ main_test_libcrypto();
return 0;
}
diff --git a/tools/build/feature/test-compile.c b/tools/build/feature/test-compile.c
index 31dbf45bf99c..c54e6551ae4c 100644
--- a/tools/build/feature/test-compile.c
+++ b/tools/build/feature/test-compile.c
@@ -1,4 +1,6 @@
+#include <stdio.h>
int main(void)
{
+ printf("Hello World!\n");
return 0;
}
diff --git a/tools/build/feature/test-libcrypto.c b/tools/build/feature/test-libcrypto.c
new file mode 100644
index 000000000000..bd79dc7f28d3
--- /dev/null
+++ b/tools/build/feature/test-libcrypto.c
@@ -0,0 +1,17 @@
+#include <openssl/sha.h>
+#include <openssl/md5.h>
+
+int main(void)
+{
+ MD5_CTX context;
+ unsigned char md[MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH];
+ unsigned char dat[] = "12345";
+
+ MD5_Init(&context);
+ MD5_Update(&context, &dat[0], sizeof(dat));
+ MD5_Final(&md[0], &context);
+
+ SHA1(&dat[0], sizeof(dat), &md[0]);
+
+ return 0;
+}
diff --git a/tools/lib/api/Build b/tools/lib/api/Build
index e8b8a23b9bf4..954c644f7ad9 100644
--- a/tools/lib/api/Build
+++ b/tools/lib/api/Build
@@ -1,3 +1,4 @@
libapi-y += fd/
libapi-y += fs/
libapi-y += cpu.o
+libapi-y += debug.o
diff --git a/tools/lib/api/Makefile b/tools/lib/api/Makefile
index d85904dc9b38..bbc82c614bee 100644
--- a/tools/lib/api/Makefile
+++ b/tools/lib/api/Makefile
@@ -18,6 +18,7 @@ LIBFILE = $(OUTPUT)libapi.a
CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC
CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
+CFLAGS += -I$(srctree)/tools/lib/api
RM = rm -f
diff --git a/tools/lib/api/debug-internal.h b/tools/lib/api/debug-internal.h
new file mode 100644
index 000000000000..188f7880eafe
--- /dev/null
+++ b/tools/lib/api/debug-internal.h
@@ -0,0 +1,20 @@
+#ifndef __API_DEBUG_INTERNAL_H__
+#define __API_DEBUG_INTERNAL_H__
+
+#include "debug.h"
+
+#define __pr(func, fmt, ...) \
+do { \
+ if ((func)) \
+ (func)("libapi: " fmt, ##__VA_ARGS__); \
+} while (0)
+
+extern libapi_print_fn_t __pr_warning;
+extern libapi_print_fn_t __pr_info;
+extern libapi_print_fn_t __pr_debug;
+
+#define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
+#define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
+#define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
+
+#endif /* __API_DEBUG_INTERNAL_H__ */
diff --git a/tools/lib/api/debug.c b/tools/lib/api/debug.c
new file mode 100644
index 000000000000..5fa5cf500a1f
--- /dev/null
+++ b/tools/lib/api/debug.c
@@ -0,0 +1,28 @@
+#include <stdio.h>
+#include <stdarg.h>
+#include "debug.h"
+#include "debug-internal.h"
+
+static int __base_pr(const char *format, ...)
+{
+ va_list args;
+ int err;
+
+ va_start(args, format);
+ err = vfprintf(stderr, format, args);
+ va_end(args);
+ return err;
+}
+
+libapi_print_fn_t __pr_warning = __base_pr;
+libapi_print_fn_t __pr_info = __base_pr;
+libapi_print_fn_t __pr_debug;
+
+void libapi_set_print(libapi_print_fn_t warn,
+ libapi_print_fn_t info,
+ libapi_print_fn_t debug)
+{
+ __pr_warning = warn;
+ __pr_info = info;
+ __pr_debug = debug;
+}
diff --git a/tools/lib/api/debug.h b/tools/lib/api/debug.h
new file mode 100644
index 000000000000..a0872f68fc56
--- /dev/null
+++ b/tools/lib/api/debug.h
@@ -0,0 +1,10 @@
+#ifndef __API_DEBUG_H__
+#define __API_DEBUG_H__
+
+typedef int (*libapi_print_fn_t)(const char *, ...);
+
+void libapi_set_print(libapi_print_fn_t warn,
+ libapi_print_fn_t info,
+ libapi_print_fn_t debug);
+
+#endif /* __API_DEBUG_H__ */
diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c
index 459599d1b6c4..ef78c22ff44d 100644
--- a/tools/lib/api/fs/fs.c
+++ b/tools/lib/api/fs/fs.c
@@ -13,6 +13,7 @@
#include <sys/mount.h>
#include "fs.h"
+#include "debug-internal.h"
#define _STR(x) #x
#define STR(x) _STR(x)
@@ -300,6 +301,56 @@ int filename__read_ull(const char *filename, unsigned long long *value)
return err;
}
+#define STRERR_BUFSIZE 128 /* For the buffer size of strerror_r */
+
+int filename__read_str(const char *filename, char **buf, size_t *sizep)
+{
+ size_t size = 0, alloc_size = 0;
+ void *bf = NULL, *nbf;
+ int fd, n, err = 0;
+ char sbuf[STRERR_BUFSIZE];
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0)
+ return -errno;
+
+ do {
+ if (size == alloc_size) {
+ alloc_size += BUFSIZ;
+ nbf = realloc(bf, alloc_size);
+ if (!nbf) {
+ err = -ENOMEM;
+ break;
+ }
+
+ bf = nbf;
+ }
+
+ n = read(fd, bf + size, alloc_size - size);
+ if (n < 0) {
+ if (size) {
+ pr_warning("read failed %d: %s\n", errno,
+ strerror_r(errno, sbuf, sizeof(sbuf)));
+ err = 0;
+ } else
+ err = -errno;
+
+ break;
+ }
+
+ size += n;
+ } while (n > 0);
+
+ if (!err) {
+ *sizep = size;
+ *buf = bf;
+ } else
+ free(bf);
+
+ close(fd);
+ return err;
+}
+
int sysfs__read_ull(const char *entry, unsigned long long *value)
{
char path[PATH_MAX];
@@ -326,6 +377,19 @@ int sysfs__read_int(const char *entry, int *value)
return filename__read_int(path, value);
}
+int sysfs__read_str(const char *entry, char **buf, size_t *sizep)
+{
+ char path[PATH_MAX];
+ const char *sysfs = sysfs__mountpoint();
+
+ if (!sysfs)
+ return -1;
+
+ snprintf(path, sizeof(path), "%s/%s", sysfs, entry);
+
+ return filename__read_str(path, buf, sizep);
+}
+
int sysctl__read_int(const char *sysctl, int *value)
{
char path[PATH_MAX];
diff --git a/tools/lib/api/fs/fs.h b/tools/lib/api/fs/fs.h
index d024a7f682f6..9f6598098dc5 100644
--- a/tools/lib/api/fs/fs.h
+++ b/tools/lib/api/fs/fs.h
@@ -2,6 +2,7 @@
#define __API_FS__
#include <stdbool.h>
+#include <unistd.h>
/*
* On most systems <limits.h> would have given us this, but not on some systems
@@ -26,8 +27,10 @@ FS(tracefs)
int filename__read_int(const char *filename, int *value);
int filename__read_ull(const char *filename, unsigned long long *value);
+int filename__read_str(const char *filename, char **buf, size_t *sizep);
int sysctl__read_int(const char *sysctl, int *value);
int sysfs__read_int(const char *entry, int *value);
int sysfs__read_ull(const char *entry, unsigned long long *value);
+int sysfs__read_str(const char *entry, char **buf, size_t *sizep);
#endif /* __API_FS__ */
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 8334a5a9d5d7..7e543c3102d4 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -201,6 +201,7 @@ struct bpf_object {
Elf_Data *data;
} *reloc;
int nr_reloc;
+ int maps_shndx;
} efile;
/*
* All loaded bpf_object is linked in a list, which is
@@ -350,6 +351,7 @@ static struct bpf_object *bpf_object__new(const char *path,
*/
obj->efile.obj_buf = obj_buf;
obj->efile.obj_buf_sz = obj_buf_sz;
+ obj->efile.maps_shndx = -1;
obj->loaded = false;
@@ -529,12 +531,12 @@ bpf_object__init_maps(struct bpf_object *obj, void *data,
}
static int
-bpf_object__init_maps_name(struct bpf_object *obj, int maps_shndx)
+bpf_object__init_maps_name(struct bpf_object *obj)
{
int i;
Elf_Data *symbols = obj->efile.symbols;
- if (!symbols || maps_shndx < 0)
+ if (!symbols || obj->efile.maps_shndx < 0)
return -EINVAL;
for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
@@ -544,7 +546,7 @@ bpf_object__init_maps_name(struct bpf_object *obj, int maps_shndx)
if (!gelf_getsym(symbols, i, &sym))
continue;
- if (sym.st_shndx != maps_shndx)
+ if (sym.st_shndx != obj->efile.maps_shndx)
continue;
map_name = elf_strptr(obj->efile.elf,
@@ -572,7 +574,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
Elf *elf = obj->efile.elf;
GElf_Ehdr *ep = &obj->efile.ehdr;
Elf_Scn *scn = NULL;
- int idx = 0, err = 0, maps_shndx = -1;
+ int idx = 0, err = 0;
/* Elf is corrupted/truncated, avoid calling elf_strptr. */
if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
@@ -625,7 +627,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
else if (strcmp(name, "maps") == 0) {
err = bpf_object__init_maps(obj, data->d_buf,
data->d_size);
- maps_shndx = idx;
+ obj->efile.maps_shndx = idx;
} else if (sh.sh_type == SHT_SYMTAB) {
if (obj->efile.symbols) {
pr_warning("bpf: multiple SYMTAB in %s\n",
@@ -674,8 +676,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
pr_warning("Corrupted ELF file: index of strtab invalid\n");
return LIBBPF_ERRNO__FORMAT;
}
- if (maps_shndx >= 0)
- err = bpf_object__init_maps_name(obj, maps_shndx);
+ if (obj->efile.maps_shndx >= 0)
+ err = bpf_object__init_maps_name(obj);
out:
return err;
}
@@ -697,7 +699,8 @@ bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
static int
bpf_program__collect_reloc(struct bpf_program *prog,
size_t nr_maps, GElf_Shdr *shdr,
- Elf_Data *data, Elf_Data *symbols)
+ Elf_Data *data, Elf_Data *symbols,
+ int maps_shndx)
{
int i, nrels;
@@ -724,9 +727,6 @@ bpf_program__collect_reloc(struct bpf_program *prog,
return -LIBBPF_ERRNO__FORMAT;
}
- insn_idx = rel.r_offset / sizeof(struct bpf_insn);
- pr_debug("relocation: insn_idx=%u\n", insn_idx);
-
if (!gelf_getsym(symbols,
GELF_R_SYM(rel.r_info),
&sym)) {
@@ -735,6 +735,15 @@ bpf_program__collect_reloc(struct bpf_program *prog,
return -LIBBPF_ERRNO__FORMAT;
}
+ if (sym.st_shndx != maps_shndx) {
+ pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
+ prog->section_name, sym.st_shndx);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+
+ insn_idx = rel.r_offset / sizeof(struct bpf_insn);
+ pr_debug("relocation: insn_idx=%u\n", insn_idx);
+
if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
insn_idx, insns[insn_idx].code);
@@ -863,7 +872,8 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
err = bpf_program__collect_reloc(prog, nr_maps,
shdr, data,
- obj->efile.symbols);
+ obj->efile.symbols,
+ obj->efile.maps_shndx);
if (err)
return err;
}
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index c3bd294a63d1..190cc886ab91 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -1951,6 +1951,7 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
strcmp(token, "*") == 0 ||
strcmp(token, "^") == 0 ||
strcmp(token, "/") == 0 ||
+ strcmp(token, "%") == 0 ||
strcmp(token, "<") == 0 ||
strcmp(token, ">") == 0 ||
strcmp(token, "<=") == 0 ||
@@ -2397,6 +2398,12 @@ static int arg_num_eval(struct print_arg *arg, long long *val)
break;
*val = left + right;
break;
+ case '~':
+ ret = arg_num_eval(arg->op.right, &right);
+ if (!ret)
+ break;
+ *val = ~right;
+ break;
default:
do_warning("unknown op '%s'", arg->op.op);
ret = 0;
@@ -2634,6 +2641,7 @@ process_hex(struct event_format *event, struct print_arg *arg, char **tok)
free_field:
free_arg(arg->hex.field);
+ arg->hex.field = NULL;
out:
*tok = NULL;
return EVENT_ERROR;
@@ -2658,8 +2666,10 @@ process_int_array(struct event_format *event, struct print_arg *arg, char **tok)
free_size:
free_arg(arg->int_array.count);
+ arg->int_array.count = NULL;
free_field:
free_arg(arg->int_array.field);
+ arg->int_array.field = NULL;
out:
*tok = NULL;
return EVENT_ERROR;
@@ -3689,6 +3699,9 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
case '/':
val = left / right;
break;
+ case '%':
+ val = left % right;
+ break;
case '*':
val = left * right;
break;
@@ -4971,7 +4984,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
break;
}
}
- if (pevent->long_size == 8 && ls &&
+ if (pevent->long_size == 8 && ls == 1 &&
sizeof(long) != 8) {
char *p;
@@ -5335,41 +5348,45 @@ static bool is_timestamp_in_us(char *trace_clock, bool use_trace_clock)
return false;
}
-void pevent_print_event(struct pevent *pevent, struct trace_seq *s,
- struct pevent_record *record, bool use_trace_clock)
+/**
+ * pevent_find_event_by_record - return the event from a given record
+ * @pevent: a handle to the pevent
+ * @record: The record to get the event from
+ *
+ * Returns the associated event for a given record, or NULL if non is
+ * is found.
+ */
+struct event_format *
+pevent_find_event_by_record(struct pevent *pevent, struct pevent_record *record)
{
- static const char *spaces = " "; /* 20 spaces */
- struct event_format *event;
- unsigned long secs;
- unsigned long usecs;
- unsigned long nsecs;
- const char *comm;
- void *data = record->data;
int type;
- int pid;
- int len;
- int p;
- bool use_usec_format;
-
- use_usec_format = is_timestamp_in_us(pevent->trace_clock,
- use_trace_clock);
- if (use_usec_format) {
- secs = record->ts / NSECS_PER_SEC;
- nsecs = record->ts - secs * NSECS_PER_SEC;
- }
if (record->size < 0) {
do_warning("ug! negative record size %d", record->size);
- return;
+ return NULL;
}
- type = trace_parse_common_type(pevent, data);
+ type = trace_parse_common_type(pevent, record->data);
- event = pevent_find_event(pevent, type);
- if (!event) {
- do_warning("ug! no event found for type %d", type);
- return;
- }
+ return pevent_find_event(pevent, type);
+}
+
+/**
+ * pevent_print_event_task - Write the event task comm, pid and CPU
+ * @pevent: a handle to the pevent
+ * @s: the trace_seq to write to
+ * @event: the handle to the record's event
+ * @record: The record to get the event from
+ *
+ * Writes the tasks comm, pid and CPU to @s.
+ */
+void pevent_print_event_task(struct pevent *pevent, struct trace_seq *s,
+ struct event_format *event,
+ struct pevent_record *record)
+{
+ void *data = record->data;
+ const char *comm;
+ int pid;
pid = parse_common_pid(pevent, data);
comm = find_cmdline(pevent, pid);
@@ -5377,9 +5394,43 @@ void pevent_print_event(struct pevent *pevent, struct trace_seq *s,
if (pevent->latency_format) {
trace_seq_printf(s, "%8.8s-%-5d %3d",
comm, pid, record->cpu);
- pevent_data_lat_fmt(pevent, s, record);
} else
trace_seq_printf(s, "%16s-%-5d [%03d]", comm, pid, record->cpu);
+}
+
+/**
+ * pevent_print_event_time - Write the event timestamp
+ * @pevent: a handle to the pevent
+ * @s: the trace_seq to write to
+ * @event: the handle to the record's event
+ * @record: The record to get the event from
+ * @use_trace_clock: Set to parse according to the @pevent->trace_clock
+ *
+ * Writes the timestamp of the record into @s.
+ */
+void pevent_print_event_time(struct pevent *pevent, struct trace_seq *s,
+ struct event_format *event,
+ struct pevent_record *record,
+ bool use_trace_clock)
+{
+ unsigned long secs;
+ unsigned long usecs;
+ unsigned long nsecs;
+ int p;
+ bool use_usec_format;
+
+ use_usec_format = is_timestamp_in_us(pevent->trace_clock,
+ use_trace_clock);
+ if (use_usec_format) {
+ secs = record->ts / NSECS_PER_SEC;
+ nsecs = record->ts - secs * NSECS_PER_SEC;
+ }
+
+ if (pevent->latency_format) {
+ trace_seq_printf(s, " %3d", record->cpu);
+ pevent_data_lat_fmt(pevent, s, record);
+ } else
+ trace_seq_printf(s, " [%03d]", record->cpu);
if (use_usec_format) {
if (pevent->flags & PEVENT_NSEC_OUTPUT) {
@@ -5387,14 +5438,36 @@ void pevent_print_event(struct pevent *pevent, struct trace_seq *s,
p = 9;
} else {
usecs = (nsecs + 500) / NSECS_PER_USEC;
+ /* To avoid usecs larger than 1 sec */
+ if (usecs >= 1000000) {
+ usecs -= 1000000;
+ secs++;
+ }
p = 6;
}
- trace_seq_printf(s, " %5lu.%0*lu: %s: ",
- secs, p, usecs, event->name);
+ trace_seq_printf(s, " %5lu.%0*lu:", secs, p, usecs);
} else
- trace_seq_printf(s, " %12llu: %s: ",
- record->ts, event->name);
+ trace_seq_printf(s, " %12llu:", record->ts);
+}
+
+/**
+ * pevent_print_event_data - Write the event data section
+ * @pevent: a handle to the pevent
+ * @s: the trace_seq to write to
+ * @event: the handle to the record's event
+ * @record: The record to get the event from
+ *
+ * Writes the parsing of the record's data to @s.
+ */
+void pevent_print_event_data(struct pevent *pevent, struct trace_seq *s,
+ struct event_format *event,
+ struct pevent_record *record)
+{
+ static const char *spaces = " "; /* 20 spaces */
+ int len;
+
+ trace_seq_printf(s, " %s: ", event->name);
/* Space out the event names evenly. */
len = strlen(event->name);
@@ -5404,6 +5477,23 @@ void pevent_print_event(struct pevent *pevent, struct trace_seq *s,
pevent_event_info(s, event, record);
}
+void pevent_print_event(struct pevent *pevent, struct trace_seq *s,
+ struct pevent_record *record, bool use_trace_clock)
+{
+ struct event_format *event;
+
+ event = pevent_find_event_by_record(pevent, record);
+ if (!event) {
+ do_warning("ug! no event found for type %d",
+ trace_parse_common_type(pevent, record->data));
+ return;
+ }
+
+ pevent_print_event_task(pevent, s, event, record);
+ pevent_print_event_time(pevent, s, event, record, use_trace_clock);
+ pevent_print_event_data(pevent, s, event, record);
+}
+
static int events_id_cmp(const void *a, const void *b)
{
struct event_format * const * ea = a;
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index 706d9bc24066..9ffde377e89d 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -628,6 +628,16 @@ int pevent_register_print_string(struct pevent *pevent, const char *fmt,
unsigned long long addr);
int pevent_pid_is_registered(struct pevent *pevent, int pid);
+void pevent_print_event_task(struct pevent *pevent, struct trace_seq *s,
+ struct event_format *event,
+ struct pevent_record *record);
+void pevent_print_event_time(struct pevent *pevent, struct trace_seq *s,
+ struct event_format *event,
+ struct pevent_record *record,
+ bool use_trace_clock);
+void pevent_print_event_data(struct pevent *pevent, struct trace_seq *s,
+ struct event_format *event,
+ struct pevent_record *record);
void pevent_print_event(struct pevent *pevent, struct trace_seq *s,
struct pevent_record *record, bool use_trace_clock);
@@ -694,6 +704,9 @@ struct event_format *pevent_find_event(struct pevent *pevent, int id);
struct event_format *
pevent_find_event_by_name(struct pevent *pevent, const char *sys, const char *name);
+struct event_format *
+pevent_find_event_by_record(struct pevent *pevent, struct pevent_record *record);
+
void pevent_data_lat_fmt(struct pevent *pevent,
struct trace_seq *s, struct pevent_record *record);
int pevent_data_type(struct pevent *pevent, struct pevent_record *rec);
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index b9ca1e304158..15949e2a7805 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -8,7 +8,7 @@ perf-config - Get and set variables in a configuration file.
SYNOPSIS
--------
[verse]
-'perf config' -l | --list
+'perf config' [<file-option>] -l | --list
DESCRIPTION
-----------
@@ -21,6 +21,14 @@ OPTIONS
--list::
Show current config variables, name and value, for all sections.
+--user::
+ For writing and reading options: write to user
+ '$HOME/.perfconfig' file or read it.
+
+--system::
+ For writing and reading options: write to system-wide
+ '$(sysconfdir)/perfconfig' or read it.
+
CONFIGURATION FILE
------------------
@@ -30,6 +38,10 @@ The '$HOME/.perfconfig' file is used to store a per-user configuration.
The file '$(sysconfdir)/perfconfig' can be used to
store a system-wide default configuration.
+When reading or writing, the values are read from the system and user
+configuration files by default, and options '--system' and '--user'
+can be used to tell the command to read from or write to only that location.
+
Syntax
~~~~~~
@@ -62,7 +74,7 @@ Given a $HOME/.perfconfig like this:
medium = green, default
normal = lightgray, default
selected = white, lightgray
- code = blue, default
+ jump_arrows = blue, default
addr = magenta, default
root = white, blue
@@ -98,6 +110,347 @@ Given a $HOME/.perfconfig like this:
order = caller
sort-key = function
+Variables
+~~~~~~~~~
+
+colors.*::
+ The variables for customizing the colors used in the output for the
+ 'report', 'top' and 'annotate' in the TUI. They should specify the
+ foreground and background colors, separated by a comma, for example:
+
+ medium = green, lightgray
+
+ If you want to use the color configured for you terminal, just leave it
+ as 'default', for example:
+
+ medium = default, lightgray
+
+ Available colors:
+ red, yellow, green, cyan, gray, black, blue,
+ white, default, magenta, lightgray
+
+ colors.top::
+ 'top' means a overhead percentage which is more than 5%.
+ And values of this variable specify percentage colors.
+ Basic key values are foreground-color 'red' and
+ background-color 'default'.
+ colors.medium::
+ 'medium' means a overhead percentage which has more than 0.5%.
+ Default values are 'green' and 'default'.
+ colors.normal::
+ 'normal' means the rest of overhead percentages
+ except 'top', 'medium', 'selected'.
+ Default values are 'lightgray' and 'default'.
+ colors.selected::
+ This selects the colors for the current entry in a list of entries
+ from sub-commands (top, report, annotate).
+ Default values are 'black' and 'lightgray'.
+ colors.jump_arrows::
+ Colors for jump arrows on assembly code listings
+ such as 'jns', 'jmp', 'jane', etc.
+ Default values are 'blue', 'default'.
+ colors.addr::
+ This selects colors for addresses from 'annotate'.
+ Default values are 'magenta', 'default'.
+ colors.root::
+ Colors for headers in the output of a sub-commands (top, report).
+ Default values are 'white', 'blue'.
+
+tui.*, gtk.*::
+ Subcommands that can be configured here are 'top', 'report' and 'annotate'.
+ These values are booleans, for example:
+
+ [tui]
+ top = true
+
+ will make the TUI be the default for the 'top' subcommand. Those will be
+ available if the required libs were detected at tool build time.
+
+buildid.*::
+ buildid.dir::
+ Each executable and shared library in modern distributions comes with a
+ content based identifier that, if available, will be inserted in a
+ 'perf.data' file header to, at analysis time find what is needed to do
+ symbol resolution, code annotation, etc.
+
+ The recording tools also stores a hard link or copy in a per-user
+ directory, $HOME/.debug/, of binaries, shared libraries, /proc/kallsyms
+ and /proc/kcore files to be used at analysis time.
+
+ The buildid.dir variable can be used to either change this directory
+ cache location, or to disable it altogether. If you want to disable it,
+ set buildid.dir to /dev/null. The default is $HOME/.debug
+
+annotate.*::
+ These options work only for TUI.
+ These are in control of addresses, jump function, source code
+ in lines of assembly code from a specific program.
+
+ annotate.hide_src_code::
+ If a program which is analyzed has source code,
+ this option lets 'annotate' print a list of assembly code with the source code.
+ For example, let's see a part of a program. There're four lines.
+ If this option is 'true', they can be printed
+ without source code from a program as below.
+
+ │ push %rbp
+ │ mov %rsp,%rbp
+ │ sub $0x10,%rsp
+ │ mov (%rdi),%rdx
+
+ But if this option is 'false', source code of the part
+ can be also printed as below. Default is 'false'.
+
+ │ struct rb_node *rb_next(const struct rb_node *node)
+ │ {
+ │ push %rbp
+ │ mov %rsp,%rbp
+ │ sub $0x10,%rsp
+ │ struct rb_node *parent;
+ │
+ │ if (RB_EMPTY_NODE(node))
+ │ mov (%rdi),%rdx
+ │ return n;
+
+ annotate.use_offset::
+ Basing on a first address of a loaded function, offset can be used.
+ Instead of using original addresses of assembly code,
+ addresses subtracted from a base address can be printed.
+ Let's illustrate an example.
+ If a base address is 0XFFFFFFFF81624d50 as below,
+
+ ffffffff81624d50 <load0>
+
+ an address on assembly code has a specific absolute address as below
+
+ ffffffff816250b8:│ mov 0x8(%r14),%rdi
+
+ but if use_offset is 'true', an address subtracted from a base address is printed.
+ Default is true. This option is only applied to TUI.
+
+ 368:│ mov 0x8(%r14),%rdi
+
+ annotate.jump_arrows::
+ There can be jump instruction among assembly code.
+ Depending on a boolean value of jump_arrows,
+ arrows can be printed or not which represent
+ where do the instruction jump into as below.
+
+ │ ┌──jmp 1333
+ │ │ xchg %ax,%ax
+ │1330:│ mov %r15,%r10
+ │1333:└─→cmp %r15,%r14
+
+ If jump_arrow is 'false', the arrows isn't printed as below.
+ Default is 'false'.
+
+ │ ↓ jmp 1333
+ │ xchg %ax,%ax
+ │1330: mov %r15,%r10
+ │1333: cmp %r15,%r14
+
+ annotate.show_linenr::
+ When showing source code if this option is 'true',
+ line numbers are printed as below.
+
+ │1628 if (type & PERF_SAMPLE_IDENTIFIER) {
+ │ ↓ jne 508
+ │1628 data->id = *array;
+ │1629 array++;
+ │1630 }
+
+ However if this option is 'false', they aren't printed as below.
+ Default is 'false'.
+
+ │ if (type & PERF_SAMPLE_IDENTIFIER) {
+ │ ↓ jne 508
+ │ data->id = *array;
+ │ array++;
+ │ }
+
+ annotate.show_nr_jumps::
+ Let's see a part of assembly code.
+
+ │1382: movb $0x1,-0x270(%rbp)
+
+ If use this, the number of branches jumping to that address can be printed as below.
+ Default is 'false'.
+
+ │1 1382: movb $0x1,-0x270(%rbp)
+
+ annotate.show_total_period::
+ To compare two records on an instruction base, with this option
+ provided, display total number of samples that belong to a line
+ in assembly code. If this option is 'true', total periods are printed
+ instead of percent values as below.
+
+ 302 │ mov %eax,%eax
+
+ But if this option is 'false', percent values for overhead are printed i.e.
+ Default is 'false'.
+
+ 99.93 │ mov %eax,%eax
+
+hist.*::
+ hist.percentage::
+ This option control the way to calculate overhead of filtered entries -
+ that means the value of this option is effective only if there's a
+ filter (by comm, dso or symbol name). Suppose a following example:
+
+ Overhead Symbols
+ ........ .......
+ 33.33% foo
+ 33.33% bar
+ 33.33% baz
+
+ This is an original overhead and we'll filter out the first 'foo'
+ entry. The value of 'relative' would increase the overhead of 'bar'
+ and 'baz' to 50.00% for each, while 'absolute' would show their
+ current overhead (33.33%).
+
+ui.*::
+ ui.show-headers::
+ This option controls display of column headers (like 'Overhead' and 'Symbol')
+ in 'report' and 'top'. If this option is false, they are hidden.
+ This option is only applied to TUI.
+
+call-graph.*::
+ When sub-commands 'top' and 'report' work with -g/—-children
+ there're options in control of call-graph.
+
+ call-graph.record-mode::
+ The record-mode can be 'fp' (frame pointer), 'dwarf' and 'lbr'.
+ The value of 'dwarf' is effective only if perf detect needed library
+ (libunwind or a recent version of libdw).
+ 'lbr' only work for cpus that support it.
+
+ call-graph.dump-size::
+ The size of stack to dump in order to do post-unwinding. Default is 8192 (byte).
+ When using dwarf into record-mode, the default size will be used if omitted.
+
+ call-graph.print-type::
+ The print-types can be graph (graph absolute), fractal (graph relative),
+ flat and folded. This option controls a way to show overhead for each callchain
+ entry. Suppose a following example.
+
+ Overhead Symbols
+ ........ .......
+ 40.00% foo
+ |
+ ---foo
+ |
+ |--50.00%--bar
+ | main
+ |
+ --50.00%--baz
+ main
+
+ This output is a 'fractal' format. The 'foo' came from 'bar' and 'baz' exactly
+ half and half so 'fractal' shows 50.00% for each
+ (meaning that it assumes 100% total overhead of 'foo').
+
+ The 'graph' uses absolute overhead value of 'foo' as total so each of
+ 'bar' and 'baz' callchain will have 20.00% of overhead.
+ If 'flat' is used, single column and linear exposure of call chains.
+ 'folded' mean call chains are displayed in a line, separated by semicolons.
+
+ call-graph.order::
+ This option controls print order of callchains. The default is
+ 'callee' which means callee is printed at top and then followed by its
+ caller and so on. The 'caller' prints it in reverse order.
+
+ If this option is not set and report.children or top.children is
+ set to true (or the equivalent command line option is given),
+ the default value of this option is changed to 'caller' for the
+ execution of 'perf report' or 'perf top'. Other commands will
+ still default to 'callee'.
+
+ call-graph.sort-key::
+ The callchains are merged if they contain same information.
+ The sort-key option determines a way to compare the callchains.
+ A value of 'sort-key' can be 'function' or 'address'.
+ The default is 'function'.
+
+ call-graph.threshold::
+ When there're many callchains it'd print tons of lines. So perf omits
+ small callchains under a certain overhead (threshold) and this option
+ control the threshold. Default is 0.5 (%). The overhead is calculated
+ by value depends on call-graph.print-type.
+
+ call-graph.print-limit::
+ This is a maximum number of lines of callchain printed for a single
+ histogram entry. Default is 0 which means no limitation.
+
+report.*::
+ report.percent-limit::
+ This one is mostly the same as call-graph.threshold but works for
+ histogram entries. Entries having an overhead lower than this
+ percentage will not be printed. Default is '0'. If percent-limit
+ is '10', only entries which have more than 10% of overhead will be
+ printed.
+
+ report.queue-size::
+ This option sets up the maximum allocation size of the internal
+ event queue for ordering events. Default is 0, meaning no limit.
+
+ report.children::
+ 'Children' means functions called from another function.
+ If this option is true, 'perf report' cumulates callchains of children
+ and show (accumulated) total overhead as well as 'Self' overhead.
+ Please refer to the 'perf report' manual. The default is 'true'.
+
+ report.group::
+ This option is to show event group information together.
+ Example output with this turned on, notice that there is one column
+ per event in the group, ref-cycles and cycles:
+
+ # group: {ref-cycles,cycles}
+ # ========
+ #
+ # Samples: 7K of event 'anon group { ref-cycles, cycles }'
+ # Event count (approx.): 6876107743
+ #
+ # Overhead Command Shared Object Symbol
+ # ................ ....... ................. ...................
+ #
+ 99.84% 99.76% noploop noploop [.] main
+ 0.07% 0.00% noploop ld-2.15.so [.] strcmp
+ 0.03% 0.00% noploop [kernel.kallsyms] [k] timerqueue_del
+
+top.*::
+ top.children::
+ Same as 'report.children'. So if it is enabled, the output of 'top'
+ command will have 'Children' overhead column as well as 'Self' overhead
+ column by default.
+ The default is 'true'.
+
+man.*::
+ man.viewer::
+ This option can assign a tool to view manual pages when 'help'
+ subcommand was invoked. Supported tools are 'man', 'woman'
+ (with emacs client) and 'konqueror'. Default is 'man'.
+
+ New man viewer tool can be also added using 'man.<tool>.cmd'
+ or use different path using 'man.<tool>.path' config option.
+
+pager.*::
+ pager.<subcommand>::
+ When the subcommand is run on stdio, determine whether it uses
+ pager or not based on this value. Default is 'unspecified'.
+
+kmem.*::
+ kmem.default::
+ This option decides which allocator is to be analyzed if neither
+ '--slab' nor '--page' option is used. Default is 'slab'.
+
+record.*::
+ record.build-id::
+ This option can be 'cache', 'no-cache' or 'skip'.
+ 'cache' is to post-process data and save/update the binaries into
+ the build-id cache (in ~/.debug). This is the default.
+ But if this option is 'no-cache', it will not update the build-id cache.
+ 'skip' skips post-processing and does not update the cache.
+
SEE ALSO
--------
linkperf:perf[1]
diff --git a/tools/perf/Documentation/perf-inject.txt b/tools/perf/Documentation/perf-inject.txt
index 0b1cedeef895..87b2588d1cbd 100644
--- a/tools/perf/Documentation/perf-inject.txt
+++ b/tools/perf/Documentation/perf-inject.txt
@@ -53,6 +53,13 @@ include::itrace.txt[]
--strip::
Use with --itrace to strip out non-synthesized events.
+-j::
+--jit::
+ Process jitdump files by injecting the mmap records corresponding to jitted
+ functions. This option also generates the ELF images for each jitted function
+ found in the jitdumps files captured in the input perf.data file. Use this option
+ if you are monitoring environment using JIT runtimes, such as Java, DART or V8.
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-report[1], linkperf:perf-archive[1]
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index fbceb631387c..19aa17532a16 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -341,6 +341,12 @@ Specify vmlinux path which has debuginfo.
--buildid-all::
Record build-id of all DSOs regardless whether it's actually hit or not.
+--all-kernel::
+Configure all used events to run in kernel space.
+
+--all-user::
+Configure all used events to run in user space.
+
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-list[1]
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 8a301f6afb37..12113992ac9d 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -117,6 +117,22 @@ OPTIONS
And default sort keys are changed to comm, dso_from, symbol_from, dso_to
and symbol_to, see '--branch-stack'.
+ If the --mem-mode option is used, the following sort keys are also available
+ (incompatible with --branch-stack):
+ symbol_daddr, dso_daddr, locked, tlb, mem, snoop, dcacheline.
+
+ - symbol_daddr: name of data symbol being executed on at the time of sample
+ - dso_daddr: name of library or module containing the data being executed
+ on at the time of the sample
+ - locked: whether the bus was locked at the time of the sample
+ - tlb: type of tlb access for the data at the time of the sample
+ - mem: type of memory access for the data at the time of the sample
+ - snoop: type of snoop (if any) for the data at the time of the sample
+ - dcacheline: the cacheline the data address is on at the time of the sample
+
+ And the default sort keys are changed to local_weight, mem, sym, dso,
+ symbol_daddr, dso_daddr, snoop, tlb, locked, see '--mem-mode'.
+
If the data file has tracepoint event(s), following (dynamic) sort keys
are also available:
trace, trace_fields, [<event>.]<field>[/raw]
@@ -151,22 +167,6 @@ OPTIONS
By default, every sort keys not specified in -F will be appended
automatically.
- If --mem-mode option is used, following sort keys are also available
- (incompatible with --branch-stack):
- symbol_daddr, dso_daddr, locked, tlb, mem, snoop, dcacheline.
-
- - symbol_daddr: name of data symbol being executed on at the time of sample
- - dso_daddr: name of library or module containing the data being executed
- on at the time of sample
- - locked: whether the bus was locked at the time of sample
- - tlb: type of tlb access for the data at the time of sample
- - mem: type of memory access for the data at the time of sample
- - snoop: type of snoop (if any) for the data at the time of sample
- - dcacheline: the cacheline the data address is on at the time of sample
-
- And default sort keys are changed to local_weight, mem, sym, dso,
- symbol_daddr, dso_daddr, snoop, tlb, locked, see '--mem-mode'.
-
-p::
--parent=<regex>::
A regex filter to identify parent. The parent is a caller of this
@@ -351,7 +351,10 @@ OPTIONS
--percent-limit::
Do not show entries which have an overhead under that percent.
- (Default: 0).
+ (Default: 0). Note that this option also sets the percent limit (threshold)
+ of callchains. However the default value of callchain threshold is
+ different than the default value of hist entries. Please see the
+ --call-graph option for details.
--percentage::
Determine how to display the overhead percentage of filtered entries.
@@ -398,6 +401,9 @@ include::itrace.txt[]
--raw-trace::
When displaying traceevent output, do not use print fmt or plugins.
+--hierarchy::
+ Enable hierarchical output.
+
include::callchain-overhead-calculation.txt[]
SEE ALSO
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 52ef7a9d50aa..04f23b404bbc 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -69,6 +69,14 @@ report::
--scale::
scale/normalize counter values
+-d::
+--detailed::
+ print more detailed statistics, can be specified up to 3 times
+
+ -d: detailed events, L1 and LLC data cache
+ -d -d: more detailed events, dTLB and iTLB events
+ -d -d -d: very detailed events, adding prefetch events
+
-r::
--repeat=<n>::
repeat command and print average + stddev (max: 100). 0 means forever.
@@ -139,6 +147,10 @@ Print count deltas every N milliseconds (minimum: 10ms)
The overhead percentage could be high in some cases, for instance with small, sub 100ms intervals. Use with caution.
example: 'perf stat -I 1000 -e cycles -a sleep 5'
+--metric-only::
+Only print computed metrics. Print them in a single line.
+Don't show any raw values. Not supported with --per-thread.
+
--per-socket::
Aggregate counts per processor socket for system-wide mode measurements. This
is a useful mode to detect imbalance between sockets. To enable this mode,
@@ -211,6 +223,29 @@ $ perf stat -- make -j
Wall-clock time elapsed: 719.554352 msecs
+CSV FORMAT
+----------
+
+With -x, perf stat is able to output a not-quite-CSV format output
+Commas in the output are not put into "". To make it easy to parse
+it is recommended to use a different character like -x \;
+
+The fields are in this order:
+
+ - optional usec time stamp in fractions of second (with -I xxx)
+ - optional CPU, core, or socket identifier
+ - optional number of logical CPUs aggregated
+ - counter value
+ - unit of the counter value or empty
+ - event name
+ - run time of counter
+ - percentage of measurement time the counter was running
+ - optional variance if multiple values are collected with -r
+ - optional metric value
+ - optional unit of metric
+
+Additional metrics may be printed with all earlier fields being empty.
+
SEE ALSO
--------
linkperf:perf-top[1], linkperf:perf-list[1]
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index b0e60e17db38..19f046f027cd 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -233,6 +233,9 @@ Default is to monitor all CPUS.
--raw-trace::
When displaying traceevent output, do not use print fmt or plugins.
+--hierarchy::
+ Enable hierarchy output.
+
INTERACTIVE PROMPTING KEYS
--------------------------
diff --git a/tools/perf/Documentation/perfconfig.example b/tools/perf/Documentation/perfconfig.example
index 767ea2436e1c..1d8d5bc4cd2d 100644
--- a/tools/perf/Documentation/perfconfig.example
+++ b/tools/perf/Documentation/perfconfig.example
@@ -5,7 +5,7 @@
medium = green, lightgray
normal = black, lightgray
selected = lightgray, magenta
- code = blue, lightgray
+ jump_arrows = blue, lightgray
addr = magenta, lightgray
[tui]
diff --git a/tools/perf/Documentation/tips.txt b/tools/perf/Documentation/tips.txt
index e0ce9573b79b..5950b5a24efd 100644
--- a/tools/perf/Documentation/tips.txt
+++ b/tools/perf/Documentation/tips.txt
@@ -27,3 +27,4 @@ Skip collecing build-id when recording: perf record -B
To change sampling frequency to 100 Hz: perf record -F 100
See assembly instructions with percentage: perf annotate <symbol>
If you prefer Intel style assembly, try: perf annotate -M intel
+For hierarchical output, try: perf report --hierarchy
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index dcd9a70c7193..32a64e619028 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -68,6 +68,20 @@ all tags TAGS:
$(print_msg)
$(make)
+ifdef MAKECMDGOALS
+has_clean := 0
+ifneq ($(filter clean,$(MAKECMDGOALS)),)
+ has_clean := 1
+endif # clean
+
+ifeq ($(has_clean),1)
+ rest := $(filter-out clean,$(MAKECMDGOALS))
+ ifneq ($(rest),)
+$(rest): clean
+ endif # rest
+endif # has_clean
+endif # MAKECMDGOALS
+
#
# The clean target is not really parallel, don't print the jobs info:
#
@@ -75,10 +89,17 @@ clean:
$(make)
#
-# The build-test target is not really parallel, don't print the jobs info:
+# The build-test target is not really parallel, don't print the jobs info,
+# it also uses only the tests/make targets that don't pollute the source
+# repository, i.e. that uses O= or builds the tarpkg outside the source
+# repo directories.
+#
+# For a full test, use:
+#
+# make -C tools/perf -f tests/make
#
build-test:
- @$(MAKE) SHUF=1 -f tests/make --no-print-directory
+ @$(MAKE) SHUF=1 -f tests/make REUSE_FEATURES_DUMP=1 MK=Makefile SET_PARALLEL=1 --no-print-directory tarpkg out
#
# All other targets get passed through:
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 5d34815c7ccb..4a4fad4182f5 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -58,6 +58,9 @@ include config/utilities.mak
#
# Define NO_LIBBIONIC if you do not want bionic support
#
+# Define NO_LIBCRYPTO if you do not want libcrypto (openssl) support
+# used for generating build-ids for ELFs generated by jitdump.
+#
# Define NO_LIBDW_DWARF_UNWIND if you do not want libdw support
# for dwarf backtrace post unwind.
#
@@ -136,6 +139,8 @@ $(call allow-override,CC,$(CROSS_COMPILE)gcc)
$(call allow-override,AR,$(CROSS_COMPILE)ar)
$(call allow-override,LD,$(CROSS_COMPILE)ld)
+LD += $(EXTRA_LDFLAGS)
+
PKG_CONFIG = $(CROSS_COMPILE)pkg-config
RM = rm -f
@@ -165,7 +170,16 @@ ifeq ($(filter-out $(NON_CONFIG_TARGETS),$(MAKECMDGOALS)),)
endif
endif
+# Set FEATURE_TESTS to 'all' so all possible feature checkers are executed.
+# Without this setting the output feature dump file misses some features, for
+# example, liberty. Select all checkers so we won't get an incomplete feature
+# dump file.
ifeq ($(config),1)
+ifdef MAKECMDGOALS
+ifeq ($(filter feature-dump,$(MAKECMDGOALS)),feature-dump)
+FEATURE_TESTS := all
+endif
+endif
include config/Makefile
endif
@@ -618,7 +632,7 @@ clean: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean
$(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32
$(call QUIET_CLEAN, core-gen) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)FEATURE-DUMP $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex* \
$(OUTPUT)util/intel-pt-decoder/inat-tables.c $(OUTPUT)fixdep \
- $(OUTPUT)tests/llvm-src-{base,kbuild,prologue}.c
+ $(OUTPUT)tests/llvm-src-{base,kbuild,prologue,relocation}.c
$(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
$(python-clean)
diff --git a/tools/perf/arch/arm/Makefile b/tools/perf/arch/arm/Makefile
index 7fbca175099e..18b13518d8d8 100644
--- a/tools/perf/arch/arm/Makefile
+++ b/tools/perf/arch/arm/Makefile
@@ -1,3 +1,4 @@
ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
endif
+PERF_HAVE_JITDUMP := 1
diff --git a/tools/perf/arch/arm64/Makefile b/tools/perf/arch/arm64/Makefile
index 7fbca175099e..18b13518d8d8 100644
--- a/tools/perf/arch/arm64/Makefile
+++ b/tools/perf/arch/arm64/Makefile
@@ -1,3 +1,4 @@
ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
endif
+PERF_HAVE_JITDUMP := 1
diff --git a/tools/perf/arch/powerpc/Makefile b/tools/perf/arch/powerpc/Makefile
index 7fbca175099e..56e05f126ad8 100644
--- a/tools/perf/arch/powerpc/Makefile
+++ b/tools/perf/arch/powerpc/Makefile
@@ -1,3 +1,6 @@
ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
endif
+
+HAVE_KVM_STAT_SUPPORT := 1
+PERF_HAVE_JITDUMP := 1
diff --git a/tools/perf/arch/powerpc/util/Build b/tools/perf/arch/powerpc/util/Build
index 7b8b0d1a1b62..c8fe2074d217 100644
--- a/tools/perf/arch/powerpc/util/Build
+++ b/tools/perf/arch/powerpc/util/Build
@@ -1,5 +1,6 @@
libperf-y += header.o
libperf-y += sym-handling.o
+libperf-y += kvm-stat.o
libperf-$(CONFIG_DWARF) += dwarf-regs.o
libperf-$(CONFIG_DWARF) += skip-callchain-idx.o
diff --git a/tools/perf/arch/powerpc/util/book3s_hcalls.h b/tools/perf/arch/powerpc/util/book3s_hcalls.h
new file mode 100644
index 000000000000..0dd6b7f2d44f
--- /dev/null
+++ b/tools/perf/arch/powerpc/util/book3s_hcalls.h
@@ -0,0 +1,123 @@
+#ifndef ARCH_PERF_BOOK3S_HV_HCALLS_H
+#define ARCH_PERF_BOOK3S_HV_HCALLS_H
+
+/*
+ * PowerPC HCALL codes : hcall code to name mapping
+ */
+#define kvm_trace_symbol_hcall \
+ {0x4, "H_REMOVE"}, \
+ {0x8, "H_ENTER"}, \
+ {0xc, "H_READ"}, \
+ {0x10, "H_CLEAR_MOD"}, \
+ {0x14, "H_CLEAR_REF"}, \
+ {0x18, "H_PROTECT"}, \
+ {0x1c, "H_GET_TCE"}, \
+ {0x20, "H_PUT_TCE"}, \
+ {0x24, "H_SET_SPRG0"}, \
+ {0x28, "H_SET_DABR"}, \
+ {0x2c, "H_PAGE_INIT"}, \
+ {0x30, "H_SET_ASR"}, \
+ {0x34, "H_ASR_ON"}, \
+ {0x38, "H_ASR_OFF"}, \
+ {0x3c, "H_LOGICAL_CI_LOAD"}, \
+ {0x40, "H_LOGICAL_CI_STORE"}, \
+ {0x44, "H_LOGICAL_CACHE_LOAD"}, \
+ {0x48, "H_LOGICAL_CACHE_STORE"}, \
+ {0x4c, "H_LOGICAL_ICBI"}, \
+ {0x50, "H_LOGICAL_DCBF"}, \
+ {0x54, "H_GET_TERM_CHAR"}, \
+ {0x58, "H_PUT_TERM_CHAR"}, \
+ {0x5c, "H_REAL_TO_LOGICAL"}, \
+ {0x60, "H_HYPERVISOR_DATA"}, \
+ {0x64, "H_EOI"}, \
+ {0x68, "H_CPPR"}, \
+ {0x6c, "H_IPI"}, \
+ {0x70, "H_IPOLL"}, \
+ {0x74, "H_XIRR"}, \
+ {0x78, "H_MIGRATE_DMA"}, \
+ {0x7c, "H_PERFMON"}, \
+ {0xdc, "H_REGISTER_VPA"}, \
+ {0xe0, "H_CEDE"}, \
+ {0xe4, "H_CONFER"}, \
+ {0xe8, "H_PROD"}, \
+ {0xec, "H_GET_PPP"}, \
+ {0xf0, "H_SET_PPP"}, \
+ {0xf4, "H_PURR"}, \
+ {0xf8, "H_PIC"}, \
+ {0xfc, "H_REG_CRQ"}, \
+ {0x100, "H_FREE_CRQ"}, \
+ {0x104, "H_VIO_SIGNAL"}, \
+ {0x108, "H_SEND_CRQ"}, \
+ {0x110, "H_COPY_RDMA"}, \
+ {0x114, "H_REGISTER_LOGICAL_LAN"}, \
+ {0x118, "H_FREE_LOGICAL_LAN"}, \
+ {0x11c, "H_ADD_LOGICAL_LAN_BUFFER"}, \
+ {0x120, "H_SEND_LOGICAL_LAN"}, \
+ {0x124, "H_BULK_REMOVE"}, \
+ {0x130, "H_MULTICAST_CTRL"}, \
+ {0x134, "H_SET_XDABR"}, \
+ {0x138, "H_STUFF_TCE"}, \
+ {0x13c, "H_PUT_TCE_INDIRECT"}, \
+ {0x14c, "H_CHANGE_LOGICAL_LAN_MAC"}, \
+ {0x150, "H_VTERM_PARTNER_INFO"}, \
+ {0x154, "H_REGISTER_VTERM"}, \
+ {0x158, "H_FREE_VTERM"}, \
+ {0x15c, "H_RESET_EVENTS"}, \
+ {0x160, "H_ALLOC_RESOURCE"}, \
+ {0x164, "H_FREE_RESOURCE"}, \
+ {0x168, "H_MODIFY_QP"}, \
+ {0x16c, "H_QUERY_QP"}, \
+ {0x170, "H_REREGISTER_PMR"}, \
+ {0x174, "H_REGISTER_SMR"}, \
+ {0x178, "H_QUERY_MR"}, \
+ {0x17c, "H_QUERY_MW"}, \
+ {0x180, "H_QUERY_HCA"}, \
+ {0x184, "H_QUERY_PORT"}, \
+ {0x188, "H_MODIFY_PORT"}, \
+ {0x18c, "H_DEFINE_AQP1"}, \
+ {0x190, "H_GET_TRACE_BUFFER"}, \
+ {0x194, "H_DEFINE_AQP0"}, \
+ {0x198, "H_RESIZE_MR"}, \
+ {0x19c, "H_ATTACH_MCQP"}, \
+ {0x1a0, "H_DETACH_MCQP"}, \
+ {0x1a4, "H_CREATE_RPT"}, \
+ {0x1a8, "H_REMOVE_RPT"}, \
+ {0x1ac, "H_REGISTER_RPAGES"}, \
+ {0x1b0, "H_DISABLE_AND_GETC"}, \
+ {0x1b4, "H_ERROR_DATA"}, \
+ {0x1b8, "H_GET_HCA_INFO"}, \
+ {0x1bc, "H_GET_PERF_COUNT"}, \
+ {0x1c0, "H_MANAGE_TRACE"}, \
+ {0x1d4, "H_FREE_LOGICAL_LAN_BUFFER"}, \
+ {0x1d8, "H_POLL_PENDING"}, \
+ {0x1e4, "H_QUERY_INT_STATE"}, \
+ {0x244, "H_ILLAN_ATTRIBUTES"}, \
+ {0x250, "H_MODIFY_HEA_QP"}, \
+ {0x254, "H_QUERY_HEA_QP"}, \
+ {0x258, "H_QUERY_HEA"}, \
+ {0x25c, "H_QUERY_HEA_PORT"}, \
+ {0x260, "H_MODIFY_HEA_PORT"}, \
+ {0x264, "H_REG_BCMC"}, \
+ {0x268, "H_DEREG_BCMC"}, \
+ {0x26c, "H_REGISTER_HEA_RPAGES"}, \
+ {0x270, "H_DISABLE_AND_GET_HEA"}, \
+ {0x274, "H_GET_HEA_INFO"}, \
+ {0x278, "H_ALLOC_HEA_RESOURCE"}, \
+ {0x284, "H_ADD_CONN"}, \
+ {0x288, "H_DEL_CONN"}, \
+ {0x298, "H_JOIN"}, \
+ {0x2a4, "H_VASI_STATE"}, \
+ {0x2b0, "H_ENABLE_CRQ"}, \
+ {0x2b8, "H_GET_EM_PARMS"}, \
+ {0x2d0, "H_SET_MPP"}, \
+ {0x2d4, "H_GET_MPP"}, \
+ {0x2ec, "H_HOME_NODE_ASSOCIATIVITY"}, \
+ {0x2f4, "H_BEST_ENERGY"}, \
+ {0x2fc, "H_XIRR_X"}, \
+ {0x300, "H_RANDOM"}, \
+ {0x304, "H_COP"}, \
+ {0x314, "H_GET_MPP_X"}, \
+ {0x31c, "H_SET_MODE"}, \
+ {0xf000, "H_RTAS"} \
+
+#endif
diff --git a/tools/perf/arch/powerpc/util/book3s_hv_exits.h b/tools/perf/arch/powerpc/util/book3s_hv_exits.h
new file mode 100644
index 000000000000..e68ba2da8970
--- /dev/null
+++ b/tools/perf/arch/powerpc/util/book3s_hv_exits.h
@@ -0,0 +1,33 @@
+#ifndef ARCH_PERF_BOOK3S_HV_EXITS_H
+#define ARCH_PERF_BOOK3S_HV_EXITS_H
+
+/*
+ * PowerPC Interrupt vectors : exit code to name mapping
+ */
+
+#define kvm_trace_symbol_exit \
+ {0x0, "RETURN_TO_HOST"}, \
+ {0x100, "SYSTEM_RESET"}, \
+ {0x200, "MACHINE_CHECK"}, \
+ {0x300, "DATA_STORAGE"}, \
+ {0x380, "DATA_SEGMENT"}, \
+ {0x400, "INST_STORAGE"}, \
+ {0x480, "INST_SEGMENT"}, \
+ {0x500, "EXTERNAL"}, \
+ {0x501, "EXTERNAL_LEVEL"}, \
+ {0x502, "EXTERNAL_HV"}, \
+ {0x600, "ALIGNMENT"}, \
+ {0x700, "PROGRAM"}, \
+ {0x800, "FP_UNAVAIL"}, \
+ {0x900, "DECREMENTER"}, \
+ {0x980, "HV_DECREMENTER"}, \
+ {0xc00, "SYSCALL"}, \
+ {0xd00, "TRACE"}, \
+ {0xe00, "H_DATA_STORAGE"}, \
+ {0xe20, "H_INST_STORAGE"}, \
+ {0xe40, "H_EMUL_ASSIST"}, \
+ {0xf00, "PERFMON"}, \
+ {0xf20, "ALTIVEC"}, \
+ {0xf40, "VSX"}
+
+#endif
diff --git a/tools/perf/arch/powerpc/util/kvm-stat.c b/tools/perf/arch/powerpc/util/kvm-stat.c
new file mode 100644
index 000000000000..74eee30398f8
--- /dev/null
+++ b/tools/perf/arch/powerpc/util/kvm-stat.c
@@ -0,0 +1,170 @@
+#include "util/kvm-stat.h"
+#include "util/parse-events.h"
+#include "util/debug.h"
+
+#include "book3s_hv_exits.h"
+#include "book3s_hcalls.h"
+
+#define NR_TPS 4
+
+const char *vcpu_id_str = "vcpu_id";
+const int decode_str_len = 40;
+const char *kvm_entry_trace = "kvm_hv:kvm_guest_enter";
+const char *kvm_exit_trace = "kvm_hv:kvm_guest_exit";
+
+define_exit_reasons_table(hv_exit_reasons, kvm_trace_symbol_exit);
+define_exit_reasons_table(hcall_reasons, kvm_trace_symbol_hcall);
+
+/* Tracepoints specific to ppc_book3s_hv */
+const char *ppc_book3s_hv_kvm_tp[] = {
+ "kvm_hv:kvm_guest_enter",
+ "kvm_hv:kvm_guest_exit",
+ "kvm_hv:kvm_hcall_enter",
+ "kvm_hv:kvm_hcall_exit",
+ NULL,
+};
+
+/* 1 extra placeholder for NULL */
+const char *kvm_events_tp[NR_TPS + 1];
+const char *kvm_exit_reason;
+
+static void hcall_event_get_key(struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ key->info = 0;
+ key->key = perf_evsel__intval(evsel, sample, "req");
+}
+
+static const char *get_hcall_exit_reason(u64 exit_code)
+{
+ struct exit_reasons_table *tbl = hcall_reasons;
+
+ while (tbl->reason != NULL) {
+ if (tbl->exit_code == exit_code)
+ return tbl->reason;
+ tbl++;
+ }
+
+ pr_debug("Unknown hcall code: %lld\n",
+ (unsigned long long)exit_code);
+ return "UNKNOWN";
+}
+
+static bool hcall_event_end(struct perf_evsel *evsel,
+ struct perf_sample *sample __maybe_unused,
+ struct event_key *key __maybe_unused)
+{
+ return (!strcmp(evsel->name, kvm_events_tp[3]));
+}
+
+static bool hcall_event_begin(struct perf_evsel *evsel,
+ struct perf_sample *sample, struct event_key *key)
+{
+ if (!strcmp(evsel->name, kvm_events_tp[2])) {
+ hcall_event_get_key(evsel, sample, key);
+ return true;
+ }
+
+ return false;
+}
+static void hcall_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
+ struct event_key *key,
+ char *decode)
+{
+ const char *hcall_reason = get_hcall_exit_reason(key->key);
+
+ scnprintf(decode, decode_str_len, "%s", hcall_reason);
+}
+
+static struct kvm_events_ops hcall_events = {
+ .is_begin_event = hcall_event_begin,
+ .is_end_event = hcall_event_end,
+ .decode_key = hcall_event_decode_key,
+ .name = "HCALL-EVENT",
+};
+
+static struct kvm_events_ops exit_events = {
+ .is_begin_event = exit_event_begin,
+ .is_end_event = exit_event_end,
+ .decode_key = exit_event_decode_key,
+ .name = "VM-EXIT"
+};
+
+struct kvm_reg_events_ops kvm_reg_events_ops[] = {
+ { .name = "vmexit", .ops = &exit_events },
+ { .name = "hcall", .ops = &hcall_events },
+ { NULL, NULL },
+};
+
+const char * const kvm_skip_events[] = {
+ NULL,
+};
+
+
+static int is_tracepoint_available(const char *str, struct perf_evlist *evlist)
+{
+ struct parse_events_error err;
+ int ret;
+
+ err.str = NULL;
+ ret = parse_events(evlist, str, &err);
+ if (err.str)
+ pr_err("%s : %s\n", str, err.str);
+ return ret;
+}
+
+static int ppc__setup_book3s_hv(struct perf_kvm_stat *kvm,
+ struct perf_evlist *evlist)
+{
+ const char **events_ptr;
+ int i, nr_tp = 0, err = -1;
+
+ /* Check for book3s_hv tracepoints */
+ for (events_ptr = ppc_book3s_hv_kvm_tp; *events_ptr; events_ptr++) {
+ err = is_tracepoint_available(*events_ptr, evlist);
+ if (err)
+ return -1;
+ nr_tp++;
+ }
+
+ for (i = 0; i < nr_tp; i++)
+ kvm_events_tp[i] = ppc_book3s_hv_kvm_tp[i];
+
+ kvm_events_tp[i] = NULL;
+ kvm_exit_reason = "trap";
+ kvm->exit_reasons = hv_exit_reasons;
+ kvm->exit_reasons_isa = "HV";
+
+ return 0;
+}
+
+/* Wrapper to setup kvm tracepoints */
+static int ppc__setup_kvm_tp(struct perf_kvm_stat *kvm)
+{
+ struct perf_evlist *evlist = perf_evlist__new();
+
+ if (evlist == NULL)
+ return -ENOMEM;
+
+ /* Right now, only supported on book3s_hv */
+ return ppc__setup_book3s_hv(kvm, evlist);
+}
+
+int setup_kvm_events_tp(struct perf_kvm_stat *kvm)
+{
+ return ppc__setup_kvm_tp(kvm);
+}
+
+int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused)
+{
+ int ret;
+
+ ret = ppc__setup_kvm_tp(kvm);
+ if (ret) {
+ kvm->exit_reasons = NULL;
+ kvm->exit_reasons_isa = NULL;
+ }
+
+ return ret;
+}
diff --git a/tools/perf/arch/s390/util/kvm-stat.c b/tools/perf/arch/s390/util/kvm-stat.c
index a5dbc07ec9dc..ed57df2e6d68 100644
--- a/tools/perf/arch/s390/util/kvm-stat.c
+++ b/tools/perf/arch/s390/util/kvm-stat.c
@@ -10,7 +10,7 @@
*/
#include "../../util/kvm-stat.h"
-#include <asm/kvm_perf.h>
+#include <asm/sie.h>
define_exit_reasons_table(sie_exit_reasons, sie_intercept_code);
define_exit_reasons_table(sie_icpt_insn_codes, icpt_insn_codes);
@@ -18,6 +18,12 @@ define_exit_reasons_table(sie_sigp_order_codes, sigp_order_codes);
define_exit_reasons_table(sie_diagnose_codes, diagnose_codes);
define_exit_reasons_table(sie_icpt_prog_codes, icpt_prog_codes);
+const char *vcpu_id_str = "id";
+const int decode_str_len = 40;
+const char *kvm_exit_reason = "icptcode";
+const char *kvm_entry_trace = "kvm:kvm_s390_sie_enter";
+const char *kvm_exit_trace = "kvm:kvm_s390_sie_exit";
+
static void event_icpt_insn_get_key(struct perf_evsel *evsel,
struct perf_sample *sample,
struct event_key *key)
@@ -73,7 +79,7 @@ static struct kvm_events_ops exit_events = {
.name = "VM-EXIT"
};
-const char * const kvm_events_tp[] = {
+const char *kvm_events_tp[] = {
"kvm:kvm_s390_sie_enter",
"kvm:kvm_s390_sie_exit",
"kvm:kvm_s390_intercept_instruction",
diff --git a/tools/perf/arch/x86/Makefile b/tools/perf/arch/x86/Makefile
index 09ba923debe8..269af2143735 100644
--- a/tools/perf/arch/x86/Makefile
+++ b/tools/perf/arch/x86/Makefile
@@ -3,3 +3,4 @@ PERF_HAVE_DWARF_REGS := 1
endif
HAVE_KVM_STAT_SUPPORT := 1
PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
+PERF_HAVE_JITDUMP := 1
diff --git a/tools/perf/arch/x86/tests/rdpmc.c b/tools/perf/arch/x86/tests/rdpmc.c
index 7bb0d13c235f..72193f19d6d7 100644
--- a/tools/perf/arch/x86/tests/rdpmc.c
+++ b/tools/perf/arch/x86/tests/rdpmc.c
@@ -59,7 +59,7 @@ static u64 mmap_read_self(void *addr)
u64 quot, rem;
quot = (cyc >> time_shift);
- rem = cyc & ((1 << time_shift) - 1);
+ rem = cyc & (((u64)1 << time_shift) - 1);
delta = time_offset + quot * time_mult +
((rem * time_mult) >> time_shift);
@@ -103,6 +103,7 @@ static int __test__rdpmc(void)
sigfillset(&sa.sa_mask);
sa.sa_sigaction = segfault_handler;
+ sa.sa_flags = 0;
sigaction(SIGSEGV, &sa, NULL);
fd = sys_perf_event_open(&attr, 0, -1, -1,
diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c
index 8d8150f1cf9b..d66f9ad4df2e 100644
--- a/tools/perf/arch/x86/util/intel-bts.c
+++ b/tools/perf/arch/x86/util/intel-bts.c
@@ -60,7 +60,9 @@ struct branch {
u64 misc;
};
-static size_t intel_bts_info_priv_size(struct auxtrace_record *itr __maybe_unused)
+static size_t
+intel_bts_info_priv_size(struct auxtrace_record *itr __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
{
return INTEL_BTS_AUXTRACE_PRIV_SIZE;
}
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index f05daacc9e78..a3395179c9ee 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -89,7 +89,7 @@ static int intel_pt_parse_terms_with_default(struct list_head *formats,
*config = attr.config;
out_free:
- parse_events__free_terms(terms);
+ parse_events_terms__delete(terms);
return err;
}
@@ -273,7 +273,9 @@ intel_pt_pmu_default_config(struct perf_pmu *intel_pt_pmu)
return attr;
}
-static size_t intel_pt_info_priv_size(struct auxtrace_record *itr __maybe_unused)
+static size_t
+intel_pt_info_priv_size(struct auxtrace_record *itr __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
{
return INTEL_PT_AUXTRACE_PRIV_SIZE;
}
diff --git a/tools/perf/arch/x86/util/kvm-stat.c b/tools/perf/arch/x86/util/kvm-stat.c
index 14e4e668fad7..b63d4be655a2 100644
--- a/tools/perf/arch/x86/util/kvm-stat.c
+++ b/tools/perf/arch/x86/util/kvm-stat.c
@@ -1,5 +1,7 @@
#include "../../util/kvm-stat.h"
-#include <asm/kvm_perf.h>
+#include <asm/svm.h>
+#include <asm/vmx.h>
+#include <asm/kvm.h>
define_exit_reasons_table(vmx_exit_reasons, VMX_EXIT_REASONS);
define_exit_reasons_table(svm_exit_reasons, SVM_EXIT_REASONS);
@@ -11,6 +13,12 @@ static struct kvm_events_ops exit_events = {
.name = "VM-EXIT"
};
+const char *vcpu_id_str = "vcpu_id";
+const int decode_str_len = 20;
+const char *kvm_exit_reason = "exit_reason";
+const char *kvm_entry_trace = "kvm:kvm_entry";
+const char *kvm_exit_trace = "kvm:kvm_exit";
+
/*
* For the mmio events, we treat:
* the time of MMIO write: kvm_mmio(KVM_TRACE_MMIO_WRITE...) -> kvm_entry
@@ -65,7 +73,7 @@ static void mmio_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
struct event_key *key,
char *decode)
{
- scnprintf(decode, DECODE_STR_LEN, "%#lx:%s",
+ scnprintf(decode, decode_str_len, "%#lx:%s",
(unsigned long)key->key,
key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R");
}
@@ -109,7 +117,7 @@ static void ioport_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
struct event_key *key,
char *decode)
{
- scnprintf(decode, DECODE_STR_LEN, "%#llx:%s",
+ scnprintf(decode, decode_str_len, "%#llx:%s",
(unsigned long long)key->key,
key->info ? "POUT" : "PIN");
}
@@ -121,7 +129,7 @@ static struct kvm_events_ops ioport_events = {
.name = "IO Port Access"
};
-const char * const kvm_events_tp[] = {
+const char *kvm_events_tp[] = {
"kvm:kvm_entry",
"kvm:kvm_exit",
"kvm:kvm_mmio",
diff --git a/tools/perf/bench/mem-memcpy-x86-64-asm.S b/tools/perf/bench/mem-memcpy-x86-64-asm.S
index e4c2c30143b9..5c3cce082cb8 100644
--- a/tools/perf/bench/mem-memcpy-x86-64-asm.S
+++ b/tools/perf/bench/mem-memcpy-x86-64-asm.S
@@ -1,6 +1,11 @@
+
+/* Various wrappers to make the kernel .S file build in user-space: */
+
#define memcpy MEMCPY /* don't hide glibc's memcpy() */
#define altinstr_replacement text
#define globl p2align 4; .globl
+#define _ASM_EXTABLE_FAULT(x, y)
+
#include "../../../arch/x86/lib/memcpy_64.S"
/*
* We need to provide note.GNU-stack section, saying that we want
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index cc5c1267c738..cfe366375c4b 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -245,7 +245,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
hists__collapse_resort(hists, NULL);
/* Don't sort callchain */
perf_evsel__reset_sample_bit(pos, CALLCHAIN);
- hists__output_resort(hists, NULL);
+ perf_evsel__output_resort(pos, NULL);
if (symbol_conf.event_group &&
!perf_evsel__is_group_leader(pos))
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index d93bff7fc0e4..632efc6b79a0 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -38,19 +38,7 @@ static int build_id_cache__kcore_buildid(const char *proc_dir, char *sbuildid)
static int build_id_cache__kcore_dir(char *dir, size_t sz)
{
- struct timeval tv;
- struct tm tm;
- char dt[32];
-
- if (gettimeofday(&tv, NULL) || !localtime_r(&tv.tv_sec, &tm))
- return -1;
-
- if (!strftime(dt, sizeof(dt), "%Y%m%d%H%M%S", &tm))
- return -1;
-
- scnprintf(dir, sz, "%s%02u", dt, (unsigned)tv.tv_usec / 10000);
-
- return 0;
+ return fetch_current_timestamp(dir, sz);
}
static bool same_kallsyms_reloc(const char *from_dir, char *to_dir)
diff --git a/tools/perf/builtin-config.c b/tools/perf/builtin-config.c
index f04e804a9fad..c42448ed5dfe 100644
--- a/tools/perf/builtin-config.c
+++ b/tools/perf/builtin-config.c
@@ -13,8 +13,10 @@
#include "util/util.h"
#include "util/debug.h"
+static bool use_system_config, use_user_config;
+
static const char * const config_usage[] = {
- "perf config [options]",
+ "perf config [<file-option>] [options]",
NULL
};
@@ -25,6 +27,8 @@ enum actions {
static struct option config_options[] = {
OPT_SET_UINT('l', "list", &actions,
"show current config variables", ACTION_LIST),
+ OPT_BOOLEAN(0, "system", &use_system_config, "use system config file"),
+ OPT_BOOLEAN(0, "user", &use_user_config, "use user config file"),
OPT_END()
};
@@ -42,10 +46,23 @@ static int show_config(const char *key, const char *value,
int cmd_config(int argc, const char **argv, const char *prefix __maybe_unused)
{
int ret = 0;
+ char *user_config = mkpath("%s/.perfconfig", getenv("HOME"));
argc = parse_options(argc, argv, config_options, config_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
+ if (use_system_config && use_user_config) {
+ pr_err("Error: only one config file at a time\n");
+ parse_options_usage(config_usage, config_options, "user", 0);
+ parse_options_usage(NULL, config_options, "system", 0);
+ return -1;
+ }
+
+ if (use_system_config)
+ config_exclusive_filename = perf_etc_perfconfig();
+ else if (use_user_config)
+ config_exclusive_filename = user_config;
+
switch (actions) {
case ACTION_LIST:
if (argc) {
@@ -53,9 +70,13 @@ int cmd_config(int argc, const char **argv, const char *prefix __maybe_unused)
parse_options_usage(config_usage, config_options, "l", 1);
} else {
ret = perf_config(show_config, NULL);
- if (ret < 0)
+ if (ret < 0) {
+ const char * config_filename = config_exclusive_filename;
+ if (!config_exclusive_filename)
+ config_filename = user_config;
pr_err("Nothing configured, "
- "please check your ~/.perfconfig file\n");
+ "please check your %s \n", config_filename);
+ }
}
break;
default:
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 36ccc2b8827f..4d72359fd15a 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -1264,8 +1264,6 @@ int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused)
if (ret < 0)
return ret;
- perf_config(perf_default_config, NULL);
-
argc = parse_options(argc, argv, options, diff_usage, 0);
if (symbol__init(NULL) < 0)
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 96c1a4cfbbbf..49d55e21b1b0 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -86,8 +86,7 @@ static int check_emacsclient_version(void)
return -1;
}
- strbuf_remove(&buffer, 0, strlen("emacsclient"));
- version = atoi(buffer.buf);
+ version = atoi(buffer.buf + strlen("emacsclient"));
if (version < 22) {
fprintf(stderr,
@@ -273,7 +272,7 @@ static int perf_help_config(const char *var, const char *value, void *cb)
if (!prefixcmp(var, "man."))
return add_man_viewer_info(var, value);
- return perf_default_config(var, value, cb);
+ return 0;
}
static struct cmdnames main_cmds, other_cmds;
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 0022e02ed31a..7fa68663ed72 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -17,6 +17,7 @@
#include "util/build-id.h"
#include "util/data.h"
#include "util/auxtrace.h"
+#include "util/jit.h"
#include <subcmd/parse-options.h>
@@ -29,6 +30,7 @@ struct perf_inject {
bool sched_stat;
bool have_auxtrace;
bool strip;
+ bool jit_mode;
const char *input_name;
struct perf_data_file output;
u64 bytes_written;
@@ -71,6 +73,15 @@ static int perf_event__repipe_oe_synth(struct perf_tool *tool,
return perf_event__repipe_synth(tool, event);
}
+#ifdef HAVE_JITDUMP
+static int perf_event__drop_oe(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct ordered_events *oe __maybe_unused)
+{
+ return 0;
+}
+#endif
+
static int perf_event__repipe_op2_synth(struct perf_tool *tool,
union perf_event *event,
struct perf_session *session
@@ -234,6 +245,31 @@ static int perf_event__repipe_mmap(struct perf_tool *tool,
return err;
}
+#ifdef HAVE_JITDUMP
+static int perf_event__jit_repipe_mmap(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
+ u64 n = 0;
+ int ret;
+
+ /*
+ * if jit marker, then inject jit mmaps and generate ELF images
+ */
+ ret = jit_process(inject->session, &inject->output, machine,
+ event->mmap.filename, sample->pid, &n);
+ if (ret < 0)
+ return ret;
+ if (ret) {
+ inject->bytes_written += n;
+ return 0;
+ }
+ return perf_event__repipe_mmap(tool, event, sample, machine);
+}
+#endif
+
static int perf_event__repipe_mmap2(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
@@ -247,6 +283,31 @@ static int perf_event__repipe_mmap2(struct perf_tool *tool,
return err;
}
+#ifdef HAVE_JITDUMP
+static int perf_event__jit_repipe_mmap2(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
+ u64 n = 0;
+ int ret;
+
+ /*
+ * if jit marker, then inject jit mmaps and generate ELF images
+ */
+ ret = jit_process(inject->session, &inject->output, machine,
+ event->mmap2.filename, sample->pid, &n);
+ if (ret < 0)
+ return ret;
+ if (ret) {
+ inject->bytes_written += n;
+ return 0;
+ }
+ return perf_event__repipe_mmap2(tool, event, sample, machine);
+}
+#endif
+
static int perf_event__repipe_fork(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
@@ -626,12 +687,16 @@ static int __cmd_inject(struct perf_inject *inject)
ret = perf_session__process_events(session);
if (!file_out->is_pipe) {
- if (inject->build_ids) {
+ if (inject->build_ids)
perf_header__set_feat(&session->header,
HEADER_BUILD_ID);
- if (inject->have_auxtrace)
- dsos__hit_all(session);
- }
+ /*
+ * Keep all buildids when there is unprocessed AUX data because
+ * it is not known which ones the AUX trace hits.
+ */
+ if (perf_header__has_feat(&session->header, HEADER_BUILD_ID) &&
+ inject->have_auxtrace && !inject->itrace_synth_opts.set)
+ dsos__hit_all(session);
/*
* The AUX areas have been removed and replaced with
* synthesized hardware events, so clear the feature flag and
@@ -703,7 +768,7 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
};
int ret;
- const struct option options[] = {
+ struct option options[] = {
OPT_BOOLEAN('b', "build-ids", &inject.build_ids,
"Inject build-ids into the output stream"),
OPT_STRING('i', "input", &inject.input_name, "file",
@@ -713,6 +778,9 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
"Merge sched-stat and sched-switch for getting events "
"where and how long tasks slept"),
+#ifdef HAVE_JITDUMP
+ OPT_BOOLEAN('j', "jit", &inject.jit_mode, "merge jitdump files into perf.data file"),
+#endif
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show build ids, etc)"),
OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
@@ -729,7 +797,9 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
"perf inject [<options>]",
NULL
};
-
+#ifndef HAVE_JITDUMP
+ set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true);
+#endif
argc = parse_options(argc, argv, options, inject_usage, 0);
/*
@@ -755,6 +825,29 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
if (inject.session == NULL)
return -1;
+ if (inject.build_ids) {
+ /*
+ * to make sure the mmap records are ordered correctly
+ * and so that the correct especially due to jitted code
+ * mmaps. We cannot generate the buildid hit list and
+ * inject the jit mmaps at the same time for now.
+ */
+ inject.tool.ordered_events = true;
+ inject.tool.ordering_requires_timestamps = true;
+ }
+#ifdef HAVE_JITDUMP
+ if (inject.jit_mode) {
+ inject.tool.mmap2 = perf_event__jit_repipe_mmap2;
+ inject.tool.mmap = perf_event__jit_repipe_mmap;
+ inject.tool.ordered_events = true;
+ inject.tool.ordering_requires_timestamps = true;
+ /*
+ * JIT MMAP injection injects all MMAP events in one go, so it
+ * does not obey finished_round semantics.
+ */
+ inject.tool.finished_round = perf_event__drop_oe;
+ }
+#endif
ret = symbol__init(&inject.session->header.env);
if (ret < 0)
goto out_delete;
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 118010553d0c..4d3340cce9a0 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -1834,7 +1834,7 @@ static int __cmd_record(int argc, const char **argv)
return cmd_record(i, rec_argv, NULL);
}
-static int kmem_config(const char *var, const char *value, void *cb)
+static int kmem_config(const char *var, const char *value, void *cb __maybe_unused)
{
if (!strcmp(var, "kmem.default")) {
if (!strcmp(value, "slab"))
@@ -1847,7 +1847,7 @@ static int kmem_config(const char *var, const char *value, void *cb)
return 0;
}
- return perf_default_config(var, value, cb);
+ return 0;
}
int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 4418d9214872..bff666458b28 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -30,7 +30,6 @@
#include <math.h>
#ifdef HAVE_KVM_STAT_SUPPORT
-#include <asm/kvm_perf.h>
#include "util/kvm-stat.h"
void exit_event_get_key(struct perf_evsel *evsel,
@@ -38,12 +37,12 @@ void exit_event_get_key(struct perf_evsel *evsel,
struct event_key *key)
{
key->info = 0;
- key->key = perf_evsel__intval(evsel, sample, KVM_EXIT_REASON);
+ key->key = perf_evsel__intval(evsel, sample, kvm_exit_reason);
}
bool kvm_exit_event(struct perf_evsel *evsel)
{
- return !strcmp(evsel->name, KVM_EXIT_TRACE);
+ return !strcmp(evsel->name, kvm_exit_trace);
}
bool exit_event_begin(struct perf_evsel *evsel,
@@ -59,7 +58,7 @@ bool exit_event_begin(struct perf_evsel *evsel,
bool kvm_entry_event(struct perf_evsel *evsel)
{
- return !strcmp(evsel->name, KVM_ENTRY_TRACE);
+ return !strcmp(evsel->name, kvm_entry_trace);
}
bool exit_event_end(struct perf_evsel *evsel,
@@ -91,7 +90,7 @@ void exit_event_decode_key(struct perf_kvm_stat *kvm,
const char *exit_reason = get_exit_reason(kvm, key->exit_reasons,
key->key);
- scnprintf(decode, DECODE_STR_LEN, "%s", exit_reason);
+ scnprintf(decode, decode_str_len, "%s", exit_reason);
}
static bool register_kvm_events_ops(struct perf_kvm_stat *kvm)
@@ -357,7 +356,7 @@ static bool handle_end_event(struct perf_kvm_stat *kvm,
time_diff = sample->time - time_begin;
if (kvm->duration && time_diff > kvm->duration) {
- char decode[DECODE_STR_LEN];
+ char decode[decode_str_len];
kvm->events_ops->decode_key(kvm, &event->key, decode);
if (!skip_event(decode)) {
@@ -385,7 +384,8 @@ struct vcpu_event_record *per_vcpu_record(struct thread *thread,
return NULL;
}
- vcpu_record->vcpu_id = perf_evsel__intval(evsel, sample, VCPU_ID);
+ vcpu_record->vcpu_id = perf_evsel__intval(evsel, sample,
+ vcpu_id_str);
thread__set_priv(thread, vcpu_record);
}
@@ -574,7 +574,7 @@ static void show_timeofday(void)
static void print_result(struct perf_kvm_stat *kvm)
{
- char decode[DECODE_STR_LEN];
+ char decode[decode_str_len];
struct kvm_event *event;
int vcpu = kvm->trace_vcpu;
@@ -585,7 +585,7 @@ static void print_result(struct perf_kvm_stat *kvm)
pr_info("\n\n");
print_vcpu_info(kvm);
- pr_info("%*s ", DECODE_STR_LEN, kvm->events_ops->name);
+ pr_info("%*s ", decode_str_len, kvm->events_ops->name);
pr_info("%10s ", "Samples");
pr_info("%9s ", "Samples%");
@@ -604,7 +604,7 @@ static void print_result(struct perf_kvm_stat *kvm)
min = get_event_min(event, vcpu);
kvm->events_ops->decode_key(kvm, &event->key, decode);
- pr_info("%*s ", DECODE_STR_LEN, decode);
+ pr_info("%*s ", decode_str_len, decode);
pr_info("%10llu ", (unsigned long long)ecount);
pr_info("%8.2f%% ", (double)ecount / kvm->total_count * 100);
pr_info("%8.2f%% ", (double)etime / kvm->total_time * 100);
@@ -1132,6 +1132,11 @@ exit:
_p; \
})
+int __weak setup_kvm_events_tp(struct perf_kvm_stat *kvm __maybe_unused)
+{
+ return 0;
+}
+
static int
kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
{
@@ -1148,7 +1153,14 @@ kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
NULL
};
const char * const *events_tp;
+ int ret;
+
events_tp_size = 0;
+ ret = setup_kvm_events_tp(kvm);
+ if (ret < 0) {
+ pr_err("Unable to setup the kvm tracepoints\n");
+ return ret;
+ }
for (events_tp = kvm_events_tp; *events_tp; events_tp++)
events_tp_size++;
@@ -1377,6 +1389,12 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
/*
* generate the event list
*/
+ err = setup_kvm_events_tp(kvm);
+ if (err < 0) {
+ pr_err("Unable to setup the kvm tracepoints\n");
+ return err;
+ }
+
kvm->evlist = kvm_live_event_list();
if (kvm->evlist == NULL) {
err = -1;
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 390170041696..88aeac9aa1da 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -6,6 +6,8 @@
#include "util/tool.h"
#include "util/session.h"
#include "util/data.h"
+#include "util/mem-events.h"
+#include "util/debug.h"
#define MEM_OPERATION_LOAD 0x1
#define MEM_OPERATION_STORE 0x2
@@ -21,11 +23,56 @@ struct perf_mem {
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
};
+static int parse_record_events(const struct option *opt,
+ const char *str, int unset __maybe_unused)
+{
+ struct perf_mem *mem = *(struct perf_mem **)opt->value;
+ int j;
+
+ if (strcmp(str, "list")) {
+ if (!perf_mem_events__parse(str)) {
+ mem->operation = 0;
+ return 0;
+ }
+ exit(-1);
+ }
+
+ for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
+ struct perf_mem_event *e = &perf_mem_events[j];
+
+ fprintf(stderr, "%-13s%-*s%s\n",
+ e->tag,
+ verbose ? 25 : 0,
+ verbose ? perf_mem_events__name(j) : "",
+ e->supported ? ": available" : "");
+ }
+ exit(0);
+}
+
+static const char * const __usage[] = {
+ "perf mem record [<options>] [<command>]",
+ "perf mem record [<options>] -- <command> [<options>]",
+ NULL
+};
+
+static const char * const *record_mem_usage = __usage;
+
static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
{
int rec_argc, i = 0, j;
const char **rec_argv;
int ret;
+ struct option options[] = {
+ OPT_CALLBACK('e', "event", &mem, "event",
+ "event selector. use 'perf mem record -e list' to list available events",
+ parse_record_events),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show counter open errors, etc)"),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, options, record_mem_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
rec_argc = argc + 7; /* max number of arguments */
rec_argv = calloc(rec_argc + 1, sizeof(char *));
@@ -35,23 +82,40 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
rec_argv[i++] = "record";
if (mem->operation & MEM_OPERATION_LOAD)
+ perf_mem_events[PERF_MEM_EVENTS__LOAD].record = true;
+
+ if (perf_mem_events[PERF_MEM_EVENTS__LOAD].record)
rec_argv[i++] = "-W";
rec_argv[i++] = "-d";
- if (mem->operation & MEM_OPERATION_LOAD) {
- rec_argv[i++] = "-e";
- rec_argv[i++] = "cpu/mem-loads/pp";
- }
+ for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
+ if (!perf_mem_events[j].record)
+ continue;
+
+ if (!perf_mem_events[j].supported) {
+ pr_err("failed: event '%s' not supported\n",
+ perf_mem_events__name(j));
+ return -1;
+ }
- if (mem->operation & MEM_OPERATION_STORE) {
rec_argv[i++] = "-e";
- rec_argv[i++] = "cpu/mem-stores/pp";
- }
+ rec_argv[i++] = perf_mem_events__name(j);
+ };
- for (j = 1; j < argc; j++, i++)
+ for (j = 0; j < argc; j++, i++)
rec_argv[i] = argv[j];
+ if (verbose > 0) {
+ pr_debug("calling: record ");
+
+ while (rec_argv[j]) {
+ pr_debug("%s ", rec_argv[j]);
+ j++;
+ }
+ pr_debug("\n");
+ }
+
ret = cmd_record(i, rec_argv, NULL);
free(rec_argv);
return ret;
@@ -298,6 +362,10 @@ int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused)
NULL
};
+ if (perf_mem_events__init()) {
+ pr_err("failed: memory events not supported\n");
+ return -1;
+ }
argc = parse_options_subcommand(argc, argv, mem_options, mem_subcommands,
mem_usage, PARSE_OPT_STOP_AT_NON_OPTION);
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 319712a4e02b..515510ecc76a 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -32,6 +32,8 @@
#include "util/parse-branch-options.h"
#include "util/parse-regs-options.h"
#include "util/llvm-utils.h"
+#include "util/bpf-loader.h"
+#include "asm/bug.h"
#include <unistd.h>
#include <sched.h>
@@ -49,7 +51,9 @@ struct record {
const char *progname;
int realtime_prio;
bool no_buildid;
+ bool no_buildid_set;
bool no_buildid_cache;
+ bool no_buildid_cache_set;
bool buildid_all;
unsigned long long samples;
};
@@ -320,7 +324,10 @@ try_again:
} else {
pr_err("failed to mmap with %d (%s)\n", errno,
strerror_r(errno, msg, sizeof(msg)));
- rc = -errno;
+ if (errno)
+ rc = -errno;
+ else
+ rc = -EINVAL;
}
goto out;
}
@@ -464,6 +471,29 @@ static void record__init_features(struct record *rec)
perf_header__clear_feat(&session->header, HEADER_STAT);
}
+static void
+record__finish_output(struct record *rec)
+{
+ struct perf_data_file *file = &rec->file;
+ int fd = perf_data_file__fd(file);
+
+ if (file->is_pipe)
+ return;
+
+ rec->session->header.data_size += rec->bytes_written;
+ file->size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
+
+ if (!rec->no_buildid) {
+ process_buildids(rec);
+
+ if (rec->buildid_all)
+ dsos__hit_all(rec->session);
+ }
+ perf_session__write_header(rec->session, rec->evlist, fd, true);
+
+ return;
+}
+
static volatile int workload_exec_errno;
/*
@@ -482,6 +512,74 @@ static void workload_exec_failed_signal(int signo __maybe_unused,
static void snapshot_sig_handler(int sig);
+static int record__synthesize(struct record *rec)
+{
+ struct perf_session *session = rec->session;
+ struct machine *machine = &session->machines.host;
+ struct perf_data_file *file = &rec->file;
+ struct record_opts *opts = &rec->opts;
+ struct perf_tool *tool = &rec->tool;
+ int fd = perf_data_file__fd(file);
+ int err = 0;
+
+ if (file->is_pipe) {
+ err = perf_event__synthesize_attrs(tool, session,
+ process_synthesized_event);
+ if (err < 0) {
+ pr_err("Couldn't synthesize attrs.\n");
+ goto out;
+ }
+
+ if (have_tracepoints(&rec->evlist->entries)) {
+ /*
+ * FIXME err <= 0 here actually means that
+ * there were no tracepoints so its not really
+ * an error, just that we don't need to
+ * synthesize anything. We really have to
+ * return this more properly and also
+ * propagate errors that now are calling die()
+ */
+ err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
+ process_synthesized_event);
+ if (err <= 0) {
+ pr_err("Couldn't record tracing data.\n");
+ goto out;
+ }
+ rec->bytes_written += err;
+ }
+ }
+
+ if (rec->opts.full_auxtrace) {
+ err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
+ session, process_synthesized_event);
+ if (err)
+ goto out;
+ }
+
+ err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
+ machine);
+ WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
+ "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
+ "Check /proc/kallsyms permission or run as root.\n");
+
+ err = perf_event__synthesize_modules(tool, process_synthesized_event,
+ machine);
+ WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
+ "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
+ "Check /proc/modules permission or run as root.\n");
+
+ if (perf_guest) {
+ machines__process_guests(&session->machines,
+ perf_event__synthesize_guest_os, tool);
+ }
+
+ err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
+ process_synthesized_event, opts->sample_address,
+ opts->proc_map_timeout);
+out:
+ return err;
+}
+
static int __cmd_record(struct record *rec, int argc, const char **argv)
{
int err;
@@ -534,6 +632,16 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
goto out_child;
}
+ err = bpf__apply_obj_config();
+ if (err) {
+ char errbuf[BUFSIZ];
+
+ bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
+ pr_err("ERROR: Apply config to BPF failed: %s\n",
+ errbuf);
+ goto out_child;
+ }
+
/*
* Normally perf_session__new would do this, but it doesn't have the
* evlist.
@@ -566,63 +674,8 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
machine = &session->machines.host;
- if (file->is_pipe) {
- err = perf_event__synthesize_attrs(tool, session,
- process_synthesized_event);
- if (err < 0) {
- pr_err("Couldn't synthesize attrs.\n");
- goto out_child;
- }
-
- if (have_tracepoints(&rec->evlist->entries)) {
- /*
- * FIXME err <= 0 here actually means that
- * there were no tracepoints so its not really
- * an error, just that we don't need to
- * synthesize anything. We really have to
- * return this more properly and also
- * propagate errors that now are calling die()
- */
- err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
- process_synthesized_event);
- if (err <= 0) {
- pr_err("Couldn't record tracing data.\n");
- goto out_child;
- }
- rec->bytes_written += err;
- }
- }
-
- if (rec->opts.full_auxtrace) {
- err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
- session, process_synthesized_event);
- if (err)
- goto out_delete_session;
- }
-
- err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
- machine);
- if (err < 0)
- pr_err("Couldn't record kernel reference relocation symbol\n"
- "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
- "Check /proc/kallsyms permission or run as root.\n");
-
- err = perf_event__synthesize_modules(tool, process_synthesized_event,
- machine);
+ err = record__synthesize(rec);
if (err < 0)
- pr_err("Couldn't record kernel module information.\n"
- "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
- "Check /proc/modules permission or run as root.\n");
-
- if (perf_guest) {
- machines__process_guests(&session->machines,
- perf_event__synthesize_guest_os, tool);
- }
-
- err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
- process_synthesized_event, opts->sample_address,
- opts->proc_map_timeout);
- if (err != 0)
goto out_child;
if (rec->realtime_prio) {
@@ -758,18 +811,8 @@ out_child:
/* this will be recalculated during process_buildids() */
rec->samples = 0;
- if (!err && !file->is_pipe) {
- rec->session->header.data_size += rec->bytes_written;
- file->size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
-
- if (!rec->no_buildid) {
- process_buildids(rec);
-
- if (rec->buildid_all)
- dsos__hit_all(rec->session);
- }
- perf_session__write_header(rec->session, rec->evlist, fd, true);
- }
+ if (!err)
+ record__finish_output(rec);
if (!err && !quiet) {
char samples[128];
@@ -1097,10 +1140,12 @@ struct option __record_options[] = {
OPT_BOOLEAN('P', "period", &record.opts.period, "Record the sample period"),
OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
"don't sample"),
- OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
- "do not update the buildid cache"),
- OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
- "do not collect buildids in perf.data"),
+ OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
+ &record.no_buildid_cache_set,
+ "do not update the buildid cache"),
+ OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
+ &record.no_buildid_set,
+ "do not collect buildids in perf.data"),
OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
"monitor event in cgroup name only",
parse_cgroups),
@@ -1136,6 +1181,12 @@ struct option __record_options[] = {
"per thread proc mmap processing timeout in ms"),
OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
"Record context switch events"),
+ OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
+ "Configure all used events to run in kernel space.",
+ PARSE_OPT_EXCLUSIVE),
+ OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
+ "Configure all used events to run in user space.",
+ PARSE_OPT_EXCLUSIVE),
OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
"clang binary to use for compiling BPF scriptlets"),
OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 2bf537f190a0..7eea49f9ed46 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -75,7 +75,10 @@ static int report__config(const char *var, const char *value, void *cb)
return 0;
}
if (!strcmp(var, "report.percent-limit")) {
- rep->min_percent = strtof(value, NULL);
+ double pcnt = strtof(value, NULL);
+
+ rep->min_percent = pcnt;
+ callchain_param.min_percent = pcnt;
return 0;
}
if (!strcmp(var, "report.children")) {
@@ -87,7 +90,7 @@ static int report__config(const char *var, const char *value, void *cb)
return 0;
}
- return perf_default_config(var, value, cb);
+ return 0;
}
static int hist_iter__report_callback(struct hist_entry_iter *iter,
@@ -466,10 +469,11 @@ static int report__browse_hists(struct report *rep)
return ret;
}
-static void report__collapse_hists(struct report *rep)
+static int report__collapse_hists(struct report *rep)
{
struct ui_progress prog;
struct perf_evsel *pos;
+ int ret = 0;
ui_progress__init(&prog, rep->nr_entries, "Merging related events...");
@@ -481,7 +485,9 @@ static void report__collapse_hists(struct report *rep)
hists->socket_filter = rep->socket_filter;
- hists__collapse_resort(hists, &prog);
+ ret = hists__collapse_resort(hists, &prog);
+ if (ret < 0)
+ break;
/* Non-group events are considered as leader */
if (symbol_conf.event_group &&
@@ -494,6 +500,7 @@ static void report__collapse_hists(struct report *rep)
}
ui_progress__finish();
+ return ret;
}
static void report__output_resort(struct report *rep)
@@ -504,7 +511,7 @@ static void report__output_resort(struct report *rep)
ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
evlist__for_each(rep->session->evlist, pos)
- hists__output_resort(evsel__hists(pos), &prog);
+ perf_evsel__output_resort(pos, &prog);
ui_progress__finish();
}
@@ -561,7 +568,11 @@ static int __cmd_report(struct report *rep)
}
}
- report__collapse_hists(rep);
+ ret = report__collapse_hists(rep);
+ if (ret) {
+ ui__error("failed to process hist entry\n");
+ return ret;
+ }
if (session_done())
return 0;
@@ -633,8 +644,10 @@ parse_percent_limit(const struct option *opt, const char *str,
int unset __maybe_unused)
{
struct report *rep = opt->value;
+ double pcnt = strtof(str, NULL);
- rep->min_percent = strtof(str, NULL);
+ rep->min_percent = pcnt;
+ callchain_param.min_percent = pcnt;
return 0;
}
@@ -798,6 +811,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
"only show processor socket that match with this filter"),
OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
"Show raw trace event output (do not use print fmt or plugins)"),
+ OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy,
+ "Show entries in a hierarchy"),
OPT_END()
};
struct perf_data_file file = {
@@ -907,13 +922,19 @@ repeat:
symbol_conf.cumulate_callchain = false;
}
- if (setup_sorting(session->evlist) < 0) {
- if (sort_order)
- parse_options_usage(report_usage, options, "s", 1);
- if (field_order)
- parse_options_usage(sort_order ? NULL : report_usage,
- options, "F", 1);
- goto error;
+ if (symbol_conf.report_hierarchy) {
+ /* disable incompatible options */
+ symbol_conf.event_group = false;
+ symbol_conf.cumulate_callchain = false;
+
+ if (field_order) {
+ pr_err("Error: --hierarchy and --fields options cannot be used together\n");
+ parse_options_usage(report_usage, options, "F", 1);
+ parse_options_usage(NULL, options, "hierarchy", 0);
+ goto error;
+ }
+
+ sort__need_collapse = true;
}
/* Force tty output for header output and per-thread stat. */
@@ -925,6 +946,15 @@ repeat:
else
use_browser = 0;
+ if (setup_sorting(session->evlist) < 0) {
+ if (sort_order)
+ parse_options_usage(report_usage, options, "s", 1);
+ if (field_order)
+ parse_options_usage(sort_order ? NULL : report_usage,
+ options, "F", 1);
+ goto error;
+ }
+
if (report.header || report.header_only) {
perf_session__fprintf_info(session, stdout,
report.show_full_info);
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index c691214d820f..57f9a7e7f7d3 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -23,6 +23,7 @@
#include "util/stat.h"
#include <linux/bitmap.h>
#include "asm/bug.h"
+#include "util/mem-events.h"
static char const *script_name;
static char const *generate_script_lang;
@@ -58,6 +59,9 @@ enum perf_output_field {
PERF_OUTPUT_IREGS = 1U << 14,
PERF_OUTPUT_BRSTACK = 1U << 15,
PERF_OUTPUT_BRSTACKSYM = 1U << 16,
+ PERF_OUTPUT_DATA_SRC = 1U << 17,
+ PERF_OUTPUT_WEIGHT = 1U << 18,
+ PERF_OUTPUT_BPF_OUTPUT = 1U << 19,
};
struct output_option {
@@ -81,6 +85,9 @@ struct output_option {
{.str = "iregs", .field = PERF_OUTPUT_IREGS},
{.str = "brstack", .field = PERF_OUTPUT_BRSTACK},
{.str = "brstacksym", .field = PERF_OUTPUT_BRSTACKSYM},
+ {.str = "data_src", .field = PERF_OUTPUT_DATA_SRC},
+ {.str = "weight", .field = PERF_OUTPUT_WEIGHT},
+ {.str = "bpf-output", .field = PERF_OUTPUT_BPF_OUTPUT},
};
/* default set to maintain compatibility with current format */
@@ -101,7 +108,7 @@ static struct {
PERF_OUTPUT_SYM | PERF_OUTPUT_DSO |
PERF_OUTPUT_PERIOD,
- .invalid_fields = PERF_OUTPUT_TRACE,
+ .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
},
[PERF_TYPE_SOFTWARE] = {
@@ -111,7 +118,7 @@ static struct {
PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
PERF_OUTPUT_SYM | PERF_OUTPUT_DSO |
- PERF_OUTPUT_PERIOD,
+ PERF_OUTPUT_PERIOD | PERF_OUTPUT_BPF_OUTPUT,
.invalid_fields = PERF_OUTPUT_TRACE,
},
@@ -121,7 +128,7 @@ static struct {
.fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
- PERF_OUTPUT_EVNAME | PERF_OUTPUT_TRACE,
+ PERF_OUTPUT_EVNAME | PERF_OUTPUT_TRACE
},
[PERF_TYPE_RAW] = {
@@ -131,9 +138,10 @@ static struct {
PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
PERF_OUTPUT_SYM | PERF_OUTPUT_DSO |
- PERF_OUTPUT_PERIOD,
+ PERF_OUTPUT_PERIOD | PERF_OUTPUT_ADDR |
+ PERF_OUTPUT_DATA_SRC | PERF_OUTPUT_WEIGHT,
- .invalid_fields = PERF_OUTPUT_TRACE,
+ .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
},
[PERF_TYPE_BREAKPOINT] = {
@@ -145,7 +153,7 @@ static struct {
PERF_OUTPUT_SYM | PERF_OUTPUT_DSO |
PERF_OUTPUT_PERIOD,
- .invalid_fields = PERF_OUTPUT_TRACE,
+ .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
},
};
@@ -242,6 +250,16 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
PERF_OUTPUT_ADDR, allow_user_set))
return -EINVAL;
+ if (PRINT_FIELD(DATA_SRC) &&
+ perf_evsel__check_stype(evsel, PERF_SAMPLE_DATA_SRC, "DATA_SRC",
+ PERF_OUTPUT_DATA_SRC))
+ return -EINVAL;
+
+ if (PRINT_FIELD(WEIGHT) &&
+ perf_evsel__check_stype(evsel, PERF_SAMPLE_WEIGHT, "WEIGHT",
+ PERF_OUTPUT_WEIGHT))
+ return -EINVAL;
+
if (PRINT_FIELD(SYM) && !PRINT_FIELD(IP) && !PRINT_FIELD(ADDR)) {
pr_err("Display of symbols requested but neither sample IP nor "
"sample address\nis selected. Hence, no addresses to convert "
@@ -608,6 +626,84 @@ static void print_sample_flags(u32 flags)
printf(" %-4s ", str);
}
+struct printer_data {
+ int line_no;
+ bool hit_nul;
+ bool is_printable;
+};
+
+static void
+print_sample_bpf_output_printer(enum binary_printer_ops op,
+ unsigned int val,
+ void *extra)
+{
+ unsigned char ch = (unsigned char)val;
+ struct printer_data *printer_data = extra;
+
+ switch (op) {
+ case BINARY_PRINT_DATA_BEGIN:
+ printf("\n");
+ break;
+ case BINARY_PRINT_LINE_BEGIN:
+ printf("%17s", !printer_data->line_no ? "BPF output:" :
+ " ");
+ break;
+ case BINARY_PRINT_ADDR:
+ printf(" %04x:", val);
+ break;
+ case BINARY_PRINT_NUM_DATA:
+ printf(" %02x", val);
+ break;
+ case BINARY_PRINT_NUM_PAD:
+ printf(" ");
+ break;
+ case BINARY_PRINT_SEP:
+ printf(" ");
+ break;
+ case BINARY_PRINT_CHAR_DATA:
+ if (printer_data->hit_nul && ch)
+ printer_data->is_printable = false;
+
+ if (!isprint(ch)) {
+ printf("%c", '.');
+
+ if (!printer_data->is_printable)
+ break;
+
+ if (ch == '\0')
+ printer_data->hit_nul = true;
+ else
+ printer_data->is_printable = false;
+ } else {
+ printf("%c", ch);
+ }
+ break;
+ case BINARY_PRINT_CHAR_PAD:
+ printf(" ");
+ break;
+ case BINARY_PRINT_LINE_END:
+ printf("\n");
+ printer_data->line_no++;
+ break;
+ case BINARY_PRINT_DATA_END:
+ default:
+ break;
+ }
+}
+
+static void print_sample_bpf_output(struct perf_sample *sample)
+{
+ unsigned int nr_bytes = sample->raw_size;
+ struct printer_data printer_data = {0, false, true};
+
+ print_binary(sample->raw_data, nr_bytes, 8,
+ print_sample_bpf_output_printer, &printer_data);
+
+ if (printer_data.is_printable && printer_data.hit_nul)
+ printf("%17s \"%s\"\n", "BPF string:",
+ (char *)(sample->raw_data));
+}
+
struct perf_script {
struct perf_tool tool;
struct perf_session *session;
@@ -634,6 +730,23 @@ static int perf_evlist__max_name_len(struct perf_evlist *evlist)
return max;
}
+static size_t data_src__printf(u64 data_src)
+{
+ struct mem_info mi = { .data_src.val = data_src };
+ char decode[100];
+ char out[100];
+ static int maxlen;
+ int len;
+
+ perf_script__meminfo_scnprintf(decode, 100, &mi);
+
+ len = scnprintf(out, 100, "%16" PRIx64 " %s", data_src, decode);
+ if (maxlen < len)
+ maxlen = len;
+
+ return printf("%-*s", maxlen, out);
+}
+
static void process_event(struct perf_script *script, union perf_event *event,
struct perf_sample *sample, struct perf_evsel *evsel,
struct addr_location *al)
@@ -673,6 +786,12 @@ static void process_event(struct perf_script *script, union perf_event *event,
if (PRINT_FIELD(ADDR))
print_sample_addr(event, sample, thread, attr);
+ if (PRINT_FIELD(DATA_SRC))
+ data_src__printf(sample->data_src);
+
+ if (PRINT_FIELD(WEIGHT))
+ printf("%16" PRIu64, sample->weight);
+
if (PRINT_FIELD(IP)) {
if (!symbol_conf.use_callchain)
printf(" ");
@@ -692,6 +811,9 @@ static void process_event(struct perf_script *script, union perf_event *event,
else if (PRINT_FIELD(BRSTACKSYM))
print_sample_brstacksym(event, sample, thread, attr);
+ if (perf_evsel__is_bpf_output(evsel) && PRINT_FIELD(BPF_OUTPUT))
+ print_sample_bpf_output(sample);
+
printf("\n");
}
@@ -1090,23 +1212,6 @@ static struct script_spec *script_spec__find(const char *spec)
return NULL;
}
-static struct script_spec *script_spec__findnew(const char *spec,
- struct scripting_ops *ops)
-{
- struct script_spec *s = script_spec__find(spec);
-
- if (s)
- return s;
-
- s = script_spec__new(spec, ops);
- if (!s)
- return NULL;
-
- script_spec__add(s);
-
- return s;
-}
-
int script_spec_register(const char *spec, struct scripting_ops *ops)
{
struct script_spec *s;
@@ -1115,9 +1220,11 @@ int script_spec_register(const char *spec, struct scripting_ops *ops)
if (s)
return -1;
- s = script_spec__findnew(spec, ops);
+ s = script_spec__new(spec, ops);
if (!s)
return -1;
+ else
+ script_spec__add(s);
return 0;
}
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 038e877081b6..1f19f2f999c8 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -122,6 +122,7 @@ static bool sync_run = false;
static unsigned int initial_delay = 0;
static unsigned int unit_width = 4; /* strlen("unit") */
static bool forever = false;
+static bool metric_only = false;
static struct timespec ref_time;
static struct cpu_map *aggr_map;
static aggr_get_id_t aggr_get_id;
@@ -735,6 +736,191 @@ static void aggr_printout(struct perf_evsel *evsel, int id, int nr)
}
}
+struct outstate {
+ FILE *fh;
+ bool newline;
+ const char *prefix;
+ int nfields;
+ int id, nr;
+ struct perf_evsel *evsel;
+};
+
+#define METRIC_LEN 35
+
+static void new_line_std(void *ctx)
+{
+ struct outstate *os = ctx;
+
+ os->newline = true;
+}
+
+static void do_new_line_std(struct outstate *os)
+{
+ fputc('\n', os->fh);
+ fputs(os->prefix, os->fh);
+ aggr_printout(os->evsel, os->id, os->nr);
+ if (stat_config.aggr_mode == AGGR_NONE)
+ fprintf(os->fh, " ");
+ fprintf(os->fh, " ");
+}
+
+static void print_metric_std(void *ctx, const char *color, const char *fmt,
+ const char *unit, double val)
+{
+ struct outstate *os = ctx;
+ FILE *out = os->fh;
+ int n;
+ bool newline = os->newline;
+
+ os->newline = false;
+
+ if (unit == NULL || fmt == NULL) {
+ fprintf(out, "%-*s", METRIC_LEN, "");
+ return;
+ }
+
+ if (newline)
+ do_new_line_std(os);
+
+ n = fprintf(out, " # ");
+ if (color)
+ n += color_fprintf(out, color, fmt, val);
+ else
+ n += fprintf(out, fmt, val);
+ fprintf(out, " %-*s", METRIC_LEN - n - 1, unit);
+}
+
+static void new_line_csv(void *ctx)
+{
+ struct outstate *os = ctx;
+ int i;
+
+ fputc('\n', os->fh);
+ if (os->prefix)
+ fprintf(os->fh, "%s%s", os->prefix, csv_sep);
+ aggr_printout(os->evsel, os->id, os->nr);
+ for (i = 0; i < os->nfields; i++)
+ fputs(csv_sep, os->fh);
+}
+
+static void print_metric_csv(void *ctx,
+ const char *color __maybe_unused,
+ const char *fmt, const char *unit, double val)
+{
+ struct outstate *os = ctx;
+ FILE *out = os->fh;
+ char buf[64], *vals, *ends;
+
+ if (unit == NULL || fmt == NULL) {
+ fprintf(out, "%s%s%s%s", csv_sep, csv_sep, csv_sep, csv_sep);
+ return;
+ }
+ snprintf(buf, sizeof(buf), fmt, val);
+ vals = buf;
+ while (isspace(*vals))
+ vals++;
+ ends = vals;
+ while (isdigit(*ends) || *ends == '.')
+ ends++;
+ *ends = 0;
+ while (isspace(*unit))
+ unit++;
+ fprintf(out, "%s%s%s%s", csv_sep, vals, csv_sep, unit);
+}
+
+#define METRIC_ONLY_LEN 20
+
+/* Filter out some columns that don't work well in metrics only mode */
+
+static bool valid_only_metric(const char *unit)
+{
+ if (!unit)
+ return false;
+ if (strstr(unit, "/sec") ||
+ strstr(unit, "hz") ||
+ strstr(unit, "Hz") ||
+ strstr(unit, "CPUs utilized"))
+ return false;
+ return true;
+}
+
+static const char *fixunit(char *buf, struct perf_evsel *evsel,
+ const char *unit)
+{
+ if (!strncmp(unit, "of all", 6)) {
+ snprintf(buf, 1024, "%s %s", perf_evsel__name(evsel),
+ unit);
+ return buf;
+ }
+ return unit;
+}
+
+static void print_metric_only(void *ctx, const char *color, const char *fmt,
+ const char *unit, double val)
+{
+ struct outstate *os = ctx;
+ FILE *out = os->fh;
+ int n;
+ char buf[1024];
+ unsigned mlen = METRIC_ONLY_LEN;
+
+ if (!valid_only_metric(unit))
+ return;
+ unit = fixunit(buf, os->evsel, unit);
+ if (color)
+ n = color_fprintf(out, color, fmt, val);
+ else
+ n = fprintf(out, fmt, val);
+ if (n > METRIC_ONLY_LEN)
+ n = METRIC_ONLY_LEN;
+ if (mlen < strlen(unit))
+ mlen = strlen(unit) + 1;
+ fprintf(out, "%*s", mlen - n, "");
+}
+
+static void print_metric_only_csv(void *ctx, const char *color __maybe_unused,
+ const char *fmt,
+ const char *unit, double val)
+{
+ struct outstate *os = ctx;
+ FILE *out = os->fh;
+ char buf[64], *vals, *ends;
+ char tbuf[1024];
+
+ if (!valid_only_metric(unit))
+ return;
+ unit = fixunit(tbuf, os->evsel, unit);
+ snprintf(buf, sizeof buf, fmt, val);
+ vals = buf;
+ while (isspace(*vals))
+ vals++;
+ ends = vals;
+ while (isdigit(*ends) || *ends == '.')
+ ends++;
+ *ends = 0;
+ fprintf(out, "%s%s", vals, csv_sep);
+}
+
+static void new_line_metric(void *ctx __maybe_unused)
+{
+}
+
+static void print_metric_header(void *ctx, const char *color __maybe_unused,
+ const char *fmt __maybe_unused,
+ const char *unit, double val __maybe_unused)
+{
+ struct outstate *os = ctx;
+ char tbuf[1024];
+
+ if (!valid_only_metric(unit))
+ return;
+ unit = fixunit(tbuf, os->evsel, unit);
+ if (csv_output)
+ fprintf(os->fh, "%s%s", unit, csv_sep);
+ else
+ fprintf(os->fh, "%-*s ", METRIC_ONLY_LEN, unit);
+}
+
static void nsec_printout(int id, int nr, struct perf_evsel *evsel, double avg)
{
FILE *output = stat_config.output;
@@ -763,6 +949,28 @@ static void nsec_printout(int id, int nr, struct perf_evsel *evsel, double avg)
fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
}
+static int first_shadow_cpu(struct perf_evsel *evsel, int id)
+{
+ int i;
+
+ if (!aggr_get_id)
+ return 0;
+
+ if (stat_config.aggr_mode == AGGR_NONE)
+ return id;
+
+ if (stat_config.aggr_mode == AGGR_GLOBAL)
+ return 0;
+
+ for (i = 0; i < perf_evsel__nr_cpus(evsel); i++) {
+ int cpu2 = perf_evsel__cpus(evsel)->map[i];
+
+ if (aggr_get_id(evsel_list->cpus, cpu2) == id)
+ return cpu2;
+ }
+ return 0;
+}
+
static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
{
FILE *output = stat_config.output;
@@ -793,22 +1001,124 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
}
-static void printout(int id, int nr, struct perf_evsel *counter, double uval)
+static void printout(int id, int nr, struct perf_evsel *counter, double uval,
+ char *prefix, u64 run, u64 ena, double noise)
{
- int cpu = cpu_map__id_to_cpu(id);
+ struct perf_stat_output_ctx out;
+ struct outstate os = {
+ .fh = stat_config.output,
+ .prefix = prefix ? prefix : "",
+ .id = id,
+ .nr = nr,
+ .evsel = counter,
+ };
+ print_metric_t pm = print_metric_std;
+ void (*nl)(void *);
- if (stat_config.aggr_mode == AGGR_GLOBAL)
- cpu = 0;
+ if (metric_only) {
+ nl = new_line_metric;
+ if (csv_output)
+ pm = print_metric_only_csv;
+ else
+ pm = print_metric_only;
+ } else
+ nl = new_line_std;
+
+ if (csv_output && !metric_only) {
+ static int aggr_fields[] = {
+ [AGGR_GLOBAL] = 0,
+ [AGGR_THREAD] = 1,
+ [AGGR_NONE] = 1,
+ [AGGR_SOCKET] = 2,
+ [AGGR_CORE] = 2,
+ };
+
+ pm = print_metric_csv;
+ nl = new_line_csv;
+ os.nfields = 3;
+ os.nfields += aggr_fields[stat_config.aggr_mode];
+ if (counter->cgrp)
+ os.nfields++;
+ }
+ if (run == 0 || ena == 0 || counter->counts->scaled == -1) {
+ if (metric_only) {
+ pm(&os, NULL, "", "", 0);
+ return;
+ }
+ aggr_printout(counter, id, nr);
+
+ fprintf(stat_config.output, "%*s%s",
+ csv_output ? 0 : 18,
+ counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
+ csv_sep);
+
+ fprintf(stat_config.output, "%-*s%s",
+ csv_output ? 0 : unit_width,
+ counter->unit, csv_sep);
+
+ fprintf(stat_config.output, "%*s",
+ csv_output ? 0 : -25,
+ perf_evsel__name(counter));
+
+ if (counter->cgrp)
+ fprintf(stat_config.output, "%s%s",
+ csv_sep, counter->cgrp->name);
- if (nsec_counter(counter))
+ if (!csv_output)
+ pm(&os, NULL, NULL, "", 0);
+ print_noise(counter, noise);
+ print_running(run, ena);
+ if (csv_output)
+ pm(&os, NULL, NULL, "", 0);
+ return;
+ }
+
+ if (metric_only)
+ /* nothing */;
+ else if (nsec_counter(counter))
nsec_printout(id, nr, counter, uval);
else
abs_printout(id, nr, counter, uval);
- if (!csv_output && !stat_config.interval)
- perf_stat__print_shadow_stats(stat_config.output, counter,
- uval, cpu,
- stat_config.aggr_mode);
+ out.print_metric = pm;
+ out.new_line = nl;
+ out.ctx = &os;
+
+ if (csv_output && !metric_only) {
+ print_noise(counter, noise);
+ print_running(run, ena);
+ }
+
+ perf_stat__print_shadow_stats(counter, uval,
+ first_shadow_cpu(counter, id),
+ &out);
+ if (!csv_output && !metric_only) {
+ print_noise(counter, noise);
+ print_running(run, ena);
+ }
+}
+
+static void aggr_update_shadow(void)
+{
+ int cpu, s2, id, s;
+ u64 val;
+ struct perf_evsel *counter;
+
+ for (s = 0; s < aggr_map->nr; s++) {
+ id = aggr_map->map[s];
+ evlist__for_each(evsel_list, counter) {
+ val = 0;
+ for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
+ s2 = aggr_get_id(evsel_list->cpus, cpu);
+ if (s2 != id)
+ continue;
+ val += perf_counts(counter->counts, cpu, 0)->val;
+ }
+ val = val * counter->scale;
+ perf_stat__update_shadow_stats(counter, &val,
+ first_shadow_cpu(counter, id));
+ }
+ }
}
static void print_aggr(char *prefix)
@@ -818,12 +1128,23 @@ static void print_aggr(char *prefix)
int cpu, s, s2, id, nr;
double uval;
u64 ena, run, val;
+ bool first;
if (!(aggr_map || aggr_get_id))
return;
+ aggr_update_shadow();
+
+ /*
+ * With metric_only everything is on a single line.
+ * Without each counter has its own line.
+ */
for (s = 0; s < aggr_map->nr; s++) {
+ if (prefix && metric_only)
+ fprintf(output, "%s", prefix);
+
id = aggr_map->map[s];
+ first = true;
evlist__for_each(evsel_list, counter) {
val = ena = run = 0;
nr = 0;
@@ -836,41 +1157,20 @@ static void print_aggr(char *prefix)
run += perf_counts(counter->counts, cpu, 0)->run;
nr++;
}
- if (prefix)
- fprintf(output, "%s", prefix);
-
- if (run == 0 || ena == 0) {
+ if (first && metric_only) {
+ first = false;
aggr_printout(counter, id, nr);
-
- fprintf(output, "%*s%s",
- csv_output ? 0 : 18,
- counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
- csv_sep);
-
- fprintf(output, "%-*s%s",
- csv_output ? 0 : unit_width,
- counter->unit, csv_sep);
-
- fprintf(output, "%*s",
- csv_output ? 0 : -25,
- perf_evsel__name(counter));
-
- if (counter->cgrp)
- fprintf(output, "%s%s",
- csv_sep, counter->cgrp->name);
-
- print_running(run, ena);
- fputc('\n', output);
- continue;
}
- uval = val * counter->scale;
- printout(id, nr, counter, uval);
- if (!csv_output)
- print_noise(counter, 1.0);
+ if (prefix && !metric_only)
+ fprintf(output, "%s", prefix);
- print_running(run, ena);
- fputc('\n', output);
+ uval = val * counter->scale;
+ printout(id, nr, counter, uval, prefix, run, ena, 1.0);
+ if (!metric_only)
+ fputc('\n', output);
}
+ if (metric_only)
+ fputc('\n', output);
}
}
@@ -895,12 +1195,7 @@ static void print_aggr_thread(struct perf_evsel *counter, char *prefix)
fprintf(output, "%s", prefix);
uval = val * counter->scale;
- printout(thread, 0, counter, uval);
-
- if (!csv_output)
- print_noise(counter, 1.0);
-
- print_running(run, ena);
+ printout(thread, 0, counter, uval, prefix, run, ena, 1.0);
fputc('\n', output);
}
}
@@ -914,43 +1209,19 @@ static void print_counter_aggr(struct perf_evsel *counter, char *prefix)
FILE *output = stat_config.output;
struct perf_stat_evsel *ps = counter->priv;
double avg = avg_stats(&ps->res_stats[0]);
- int scaled = counter->counts->scaled;
double uval;
double avg_enabled, avg_running;
avg_enabled = avg_stats(&ps->res_stats[1]);
avg_running = avg_stats(&ps->res_stats[2]);
- if (prefix)
+ if (prefix && !metric_only)
fprintf(output, "%s", prefix);
- if (scaled == -1 || !counter->supported) {
- fprintf(output, "%*s%s",
- csv_output ? 0 : 18,
- counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
- csv_sep);
- fprintf(output, "%-*s%s",
- csv_output ? 0 : unit_width,
- counter->unit, csv_sep);
- fprintf(output, "%*s",
- csv_output ? 0 : -25,
- perf_evsel__name(counter));
-
- if (counter->cgrp)
- fprintf(output, "%s%s", csv_sep, counter->cgrp->name);
-
- print_running(avg_running, avg_enabled);
- fputc('\n', output);
- return;
- }
-
uval = avg * counter->scale;
- printout(-1, 0, counter, uval);
-
- print_noise(counter, avg);
-
- print_running(avg_running, avg_enabled);
- fprintf(output, "\n");
+ printout(-1, 0, counter, uval, prefix, avg_running, avg_enabled, avg);
+ if (!metric_only)
+ fprintf(output, "\n");
}
/*
@@ -972,39 +1243,78 @@ static void print_counter(struct perf_evsel *counter, char *prefix)
if (prefix)
fprintf(output, "%s", prefix);
- if (run == 0 || ena == 0) {
- fprintf(output, "CPU%*d%s%*s%s",
- csv_output ? 0 : -4,
- perf_evsel__cpus(counter)->map[cpu], csv_sep,
- csv_output ? 0 : 18,
- counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
- csv_sep);
+ uval = val * counter->scale;
+ printout(cpu, 0, counter, uval, prefix, run, ena, 1.0);
- fprintf(output, "%-*s%s",
- csv_output ? 0 : unit_width,
- counter->unit, csv_sep);
+ fputc('\n', output);
+ }
+}
- fprintf(output, "%*s",
- csv_output ? 0 : -25,
- perf_evsel__name(counter));
+static void print_no_aggr_metric(char *prefix)
+{
+ int cpu;
+ int nrcpus = 0;
+ struct perf_evsel *counter;
+ u64 ena, run, val;
+ double uval;
- if (counter->cgrp)
- fprintf(output, "%s%s",
- csv_sep, counter->cgrp->name);
+ nrcpus = evsel_list->cpus->nr;
+ for (cpu = 0; cpu < nrcpus; cpu++) {
+ bool first = true;
- print_running(run, ena);
- fputc('\n', output);
- continue;
+ if (prefix)
+ fputs(prefix, stat_config.output);
+ evlist__for_each(evsel_list, counter) {
+ if (first) {
+ aggr_printout(counter, cpu, 0);
+ first = false;
+ }
+ val = perf_counts(counter->counts, cpu, 0)->val;
+ ena = perf_counts(counter->counts, cpu, 0)->ena;
+ run = perf_counts(counter->counts, cpu, 0)->run;
+
+ uval = val * counter->scale;
+ printout(cpu, 0, counter, uval, prefix, run, ena, 1.0);
}
+ fputc('\n', stat_config.output);
+ }
+}
- uval = val * counter->scale;
- printout(cpu, 0, counter, uval);
- if (!csv_output)
- print_noise(counter, 1.0);
- print_running(run, ena);
+static int aggr_header_lens[] = {
+ [AGGR_CORE] = 18,
+ [AGGR_SOCKET] = 12,
+ [AGGR_NONE] = 6,
+ [AGGR_THREAD] = 24,
+ [AGGR_GLOBAL] = 0,
+};
- fputc('\n', output);
+static void print_metric_headers(char *prefix)
+{
+ struct perf_stat_output_ctx out;
+ struct perf_evsel *counter;
+ struct outstate os = {
+ .fh = stat_config.output
+ };
+
+ if (prefix)
+ fprintf(stat_config.output, "%s", prefix);
+
+ if (!csv_output)
+ fprintf(stat_config.output, "%*s",
+ aggr_header_lens[stat_config.aggr_mode], "");
+
+ /* Print metrics headers only */
+ evlist__for_each(evsel_list, counter) {
+ os.evsel = counter;
+ out.ctx = &os;
+ out.print_metric = print_metric_header;
+ out.new_line = new_line_metric;
+ os.evsel = counter;
+ perf_stat__print_shadow_stats(counter, 0,
+ 0,
+ &out);
}
+ fputc('\n', stat_config.output);
}
static void print_interval(char *prefix, struct timespec *ts)
@@ -1014,7 +1324,7 @@ static void print_interval(char *prefix, struct timespec *ts)
sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, csv_sep);
- if (num_print_interval == 0 && !csv_output) {
+ if (num_print_interval == 0 && !csv_output && !metric_only) {
switch (stat_config.aggr_mode) {
case AGGR_SOCKET:
fprintf(output, "# time socket cpus counts %*s events\n", unit_width, "unit");
@@ -1101,6 +1411,17 @@ static void print_counters(struct timespec *ts, int argc, const char **argv)
else
print_header(argc, argv);
+ if (metric_only) {
+ static int num_print_iv;
+
+ if (num_print_iv == 0)
+ print_metric_headers(prefix);
+ if (num_print_iv++ == 25)
+ num_print_iv = 0;
+ if (stat_config.aggr_mode == AGGR_GLOBAL && prefix)
+ fprintf(stat_config.output, "%s", prefix);
+ }
+
switch (stat_config.aggr_mode) {
case AGGR_CORE:
case AGGR_SOCKET:
@@ -1113,10 +1434,16 @@ static void print_counters(struct timespec *ts, int argc, const char **argv)
case AGGR_GLOBAL:
evlist__for_each(evsel_list, counter)
print_counter_aggr(counter, prefix);
+ if (metric_only)
+ fputc('\n', stat_config.output);
break;
case AGGR_NONE:
- evlist__for_each(evsel_list, counter)
- print_counter(counter, prefix);
+ if (metric_only)
+ print_no_aggr_metric(prefix);
+ else {
+ evlist__for_each(evsel_list, counter)
+ print_counter(counter, prefix);
+ }
break;
case AGGR_UNSET:
default:
@@ -1237,6 +1564,8 @@ static const struct option stat_options[] = {
"aggregate counts per thread", AGGR_THREAD),
OPT_UINTEGER('D', "delay", &initial_delay,
"ms to wait before starting measurement after program start"),
+ OPT_BOOLEAN(0, "metric-only", &metric_only,
+ "Only print computed metrics. No raw values"),
OPT_END()
};
@@ -1435,7 +1764,7 @@ static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
*/
static int add_default_attributes(void)
{
- struct perf_event_attr default_attrs[] = {
+ struct perf_event_attr default_attrs0[] = {
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES },
@@ -1443,8 +1772,14 @@ static int add_default_attributes(void)
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES },
+};
+ struct perf_event_attr frontend_attrs[] = {
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
+};
+ struct perf_event_attr backend_attrs[] = {
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
+};
+ struct perf_event_attr default_attrs1[] = {
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS },
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES },
@@ -1561,7 +1896,19 @@ static int add_default_attributes(void)
}
if (!evsel_list->nr_entries) {
- if (perf_evlist__add_default_attrs(evsel_list, default_attrs) < 0)
+ if (perf_evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
+ return -1;
+ if (pmu_have_event("cpu", "stalled-cycles-frontend")) {
+ if (perf_evlist__add_default_attrs(evsel_list,
+ frontend_attrs) < 0)
+ return -1;
+ }
+ if (pmu_have_event("cpu", "stalled-cycles-backend")) {
+ if (perf_evlist__add_default_attrs(evsel_list,
+ backend_attrs) < 0)
+ return -1;
+ }
+ if (perf_evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
return -1;
}
@@ -1825,9 +2172,11 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
if (evsel_list == NULL)
return -ENOMEM;
+ parse_events__shrink_config_terms();
argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands,
(const char **) stat_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
+ perf_stat__init_shadow_stats();
if (csv_sep) {
csv_output = true;
@@ -1858,6 +2207,16 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
goto out;
}
+ if (metric_only && stat_config.aggr_mode == AGGR_THREAD) {
+ fprintf(stderr, "--metric-only is not supported with --per-thread\n");
+ goto out;
+ }
+
+ if (metric_only && run_count > 1) {
+ fprintf(stderr, "--metric-only is not supported with -r\n");
+ goto out;
+ }
+
if (output_fd < 0) {
fprintf(stderr, "argument to --log-fd must be a > 0\n");
parse_options_usage(stat_usage, stat_options, "log-fd", 0);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index bf01cbb0ef23..94af190f6843 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -252,7 +252,8 @@ static void perf_top__print_sym_table(struct perf_top *top)
char bf[160];
int printed = 0;
const int win_width = top->winsize.ws_col - 1;
- struct hists *hists = evsel__hists(top->sym_evsel);
+ struct perf_evsel *evsel = top->sym_evsel;
+ struct hists *hists = evsel__hists(evsel);
puts(CONSOLE_CLEAR);
@@ -288,7 +289,7 @@ static void perf_top__print_sym_table(struct perf_top *top)
}
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists, NULL);
+ perf_evsel__output_resort(evsel, NULL);
hists__output_recalc_col_len(hists, top->print_entries - printed);
putchar('\n');
@@ -540,6 +541,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
static void perf_top__sort_new_samples(void *arg)
{
struct perf_top *t = arg;
+ struct perf_evsel *evsel = t->sym_evsel;
struct hists *hists;
perf_top__reset_sample_counters(t);
@@ -547,7 +549,7 @@ static void perf_top__sort_new_samples(void *arg)
if (t->evlist->selected != NULL)
t->sym_evsel = t->evlist->selected;
- hists = evsel__hists(t->sym_evsel);
+ hists = evsel__hists(evsel);
if (t->evlist->enabled) {
if (t->zero) {
@@ -559,7 +561,7 @@ static void perf_top__sort_new_samples(void *arg)
}
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists, NULL);
+ perf_evsel__output_resort(evsel, NULL);
}
static void *display_thread_tui(void *arg)
@@ -1063,7 +1065,7 @@ parse_callchain_opt(const struct option *opt, const char *arg, int unset)
return parse_callchain_top_opt(arg);
}
-static int perf_top_config(const char *var, const char *value, void *cb)
+static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused)
{
if (!strcmp(var, "top.call-graph"))
var = "call-graph.record-mode"; /* fall-through */
@@ -1072,7 +1074,7 @@ static int perf_top_config(const char *var, const char *value, void *cb)
return 0;
}
- return perf_default_config(var, value, cb);
+ return 0;
}
static int
@@ -1212,6 +1214,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
parse_branch_stack),
OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
"Show raw trace event output (do not use print fmt or plugins)"),
+ OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy,
+ "Show entries in a hierarchy"),
OPT_END()
};
const char * const top_usage[] = {
@@ -1239,10 +1243,30 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
goto out_delete_evlist;
}
+ if (symbol_conf.report_hierarchy) {
+ /* disable incompatible options */
+ symbol_conf.event_group = false;
+ symbol_conf.cumulate_callchain = false;
+
+ if (field_order) {
+ pr_err("Error: --hierarchy and --fields options cannot be used together\n");
+ parse_options_usage(top_usage, options, "fields", 0);
+ parse_options_usage(NULL, options, "hierarchy", 0);
+ goto out_delete_evlist;
+ }
+ }
+
sort__mode = SORT_MODE__TOP;
/* display thread wants entries to be collapsed in a different tree */
sort__need_collapse = 1;
+ if (top.use_stdio)
+ use_browser = 0;
+ else if (top.use_tui)
+ use_browser = 1;
+
+ setup_browser(false);
+
if (setup_sorting(top.evlist) < 0) {
if (sort_order)
parse_options_usage(top_usage, options, "s", 1);
@@ -1252,13 +1276,6 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
goto out_delete_evlist;
}
- if (top.use_stdio)
- use_browser = 0;
- else if (top.use_tui)
- use_browser = 1;
-
- setup_browser(false);
-
status = target__validate(target);
if (status) {
target__strerror(target, status, errbuf, BUFSIZ);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 20916dd77aac..8dc98c598b1a 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -33,6 +33,7 @@
#include "util/stat.h"
#include "trace-event.h"
#include "util/parse-events.h"
+#include "util/bpf-loader.h"
#include <libaudit.h>
#include <stdlib.h>
@@ -1724,8 +1725,12 @@ static int trace__read_syscall_info(struct trace *trace, int id)
sc->args = sc->tp_format->format.fields;
sc->nr_args = sc->tp_format->format.nr_fields;
- /* drop nr field - not relevant here; does not exist on older kernels */
- if (sc->args && strcmp(sc->args->name, "nr") == 0) {
+ /*
+ * We need to check and discard the first variable '__syscall_nr'
+ * or 'nr' that mean the syscall number. It is needless here.
+ * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
+ */
+ if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
sc->args = sc->args->next;
--sc->nr_args;
}
@@ -2177,6 +2182,37 @@ out_dump:
return 0;
}
+static void bpf_output__printer(enum binary_printer_ops op,
+ unsigned int val, void *extra)
+{
+ FILE *output = extra;
+ unsigned char ch = (unsigned char)val;
+
+ switch (op) {
+ case BINARY_PRINT_CHAR_DATA:
+ fprintf(output, "%c", isprint(ch) ? ch : '.');
+ break;
+ case BINARY_PRINT_DATA_BEGIN:
+ case BINARY_PRINT_LINE_BEGIN:
+ case BINARY_PRINT_ADDR:
+ case BINARY_PRINT_NUM_DATA:
+ case BINARY_PRINT_NUM_PAD:
+ case BINARY_PRINT_SEP:
+ case BINARY_PRINT_CHAR_PAD:
+ case BINARY_PRINT_LINE_END:
+ case BINARY_PRINT_DATA_END:
+ default:
+ break;
+ }
+}
+
+static void bpf_output__fprintf(struct trace *trace,
+ struct perf_sample *sample)
+{
+ print_binary(sample->raw_data, sample->raw_size, 8,
+ bpf_output__printer, trace->output);
+}
+
static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
@@ -2189,7 +2225,9 @@ static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
fprintf(trace->output, "%s:", evsel->name);
- if (evsel->tp_format) {
+ if (perf_evsel__is_bpf_output(evsel)) {
+ bpf_output__fprintf(trace, sample);
+ } else if (evsel->tp_format) {
event_format__fprintf(evsel->tp_format, sample->cpu,
sample->raw_data, sample->raw_size,
trace->output);
@@ -2586,6 +2624,16 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (err < 0)
goto out_error_open;
+ err = bpf__apply_obj_config();
+ if (err) {
+ char errbuf[BUFSIZ];
+
+ bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
+ pr_err("ERROR: Apply config to BPF failed: %s\n",
+ errbuf);
+ goto out_error_open;
+ }
+
/*
* Better not use !target__has_task() here because we need to cover the
* case where no threads were specified in the command line, but a
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 511141b102e8..eca6a912e8c2 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -61,50 +61,45 @@ endif
ifeq ($(LIBUNWIND_LIBS),)
NO_LIBUNWIND := 1
-else
- #
- # For linking with debug library, run like:
- #
- # make DEBUG=1 LIBUNWIND_DIR=/opt/libunwind/
- #
- ifdef LIBUNWIND_DIR
- LIBUNWIND_CFLAGS = -I$(LIBUNWIND_DIR)/include
- LIBUNWIND_LDFLAGS = -L$(LIBUNWIND_DIR)/lib
- endif
- LIBUNWIND_LDFLAGS += $(LIBUNWIND_LIBS)
-
- # Set per-feature check compilation flags
- FEATURE_CHECK_CFLAGS-libunwind = $(LIBUNWIND_CFLAGS)
- FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS)
- FEATURE_CHECK_CFLAGS-libunwind-debug-frame = $(LIBUNWIND_CFLAGS)
- FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS)
endif
+#
+# For linking with debug library, run like:
+#
+# make DEBUG=1 LIBUNWIND_DIR=/opt/libunwind/
+#
+ifdef LIBUNWIND_DIR
+ LIBUNWIND_CFLAGS = -I$(LIBUNWIND_DIR)/include
+ LIBUNWIND_LDFLAGS = -L$(LIBUNWIND_DIR)/lib
+endif
+LIBUNWIND_LDFLAGS += $(LIBUNWIND_LIBS)
+
+# Set per-feature check compilation flags
+FEATURE_CHECK_CFLAGS-libunwind = $(LIBUNWIND_CFLAGS)
+FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS)
+FEATURE_CHECK_CFLAGS-libunwind-debug-frame = $(LIBUNWIND_CFLAGS)
+FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS)
ifeq ($(NO_PERF_REGS),0)
CFLAGS += -DHAVE_PERF_REGS_SUPPORT
endif
-ifndef NO_LIBELF
- # for linking with debug library, run like:
- # make DEBUG=1 LIBDW_DIR=/opt/libdw/
- ifdef LIBDW_DIR
- LIBDW_CFLAGS := -I$(LIBDW_DIR)/include
- LIBDW_LDFLAGS := -L$(LIBDW_DIR)/lib
- endif
- FEATURE_CHECK_CFLAGS-libdw-dwarf-unwind := $(LIBDW_CFLAGS)
- FEATURE_CHECK_LDFLAGS-libdw-dwarf-unwind := $(LIBDW_LDFLAGS) -ldw
+# for linking with debug library, run like:
+# make DEBUG=1 LIBDW_DIR=/opt/libdw/
+ifdef LIBDW_DIR
+ LIBDW_CFLAGS := -I$(LIBDW_DIR)/include
+ LIBDW_LDFLAGS := -L$(LIBDW_DIR)/lib
endif
+FEATURE_CHECK_CFLAGS-libdw-dwarf-unwind := $(LIBDW_CFLAGS)
+FEATURE_CHECK_LDFLAGS-libdw-dwarf-unwind := $(LIBDW_LDFLAGS) -ldw
-ifdef LIBBABELTRACE
- # for linking with debug library, run like:
- # make DEBUG=1 LIBBABELTRACE_DIR=/opt/libbabeltrace/
- ifdef LIBBABELTRACE_DIR
- LIBBABELTRACE_CFLAGS := -I$(LIBBABELTRACE_DIR)/include
- LIBBABELTRACE_LDFLAGS := -L$(LIBBABELTRACE_DIR)/lib
- endif
- FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS)
- FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf
+# for linking with debug library, run like:
+# make DEBUG=1 LIBBABELTRACE_DIR=/opt/libbabeltrace/
+ifdef LIBBABELTRACE_DIR
+ LIBBABELTRACE_CFLAGS := -I$(LIBBABELTRACE_DIR)/include
+ LIBBABELTRACE_LDFLAGS := -L$(LIBBABELTRACE_DIR)/lib
endif
+FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS)
+FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf
FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/arch/$(ARCH)/include/uapi -I$(srctree)/include/uapi
# include ARCH specific config
@@ -145,28 +140,26 @@ ifdef PARSER_DEBUG
$(call detected_var,PARSER_DEBUG_FLEX)
endif
-ifndef NO_LIBPYTHON
- # Try different combinations to accommodate systems that only have
- # python[2][-config] in weird combinations but always preferring
- # python2 and python2-config as per pep-0394. If we catch a
- # python[-config] in version 3, the version check will kill it.
- PYTHON2 := $(if $(call get-executable,python2),python2,python)
- override PYTHON := $(call get-executable-or-default,PYTHON,$(PYTHON2))
- PYTHON2_CONFIG := \
- $(if $(call get-executable,$(PYTHON)-config),$(PYTHON)-config,python-config)
- override PYTHON_CONFIG := \
- $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON2_CONFIG))
+# Try different combinations to accommodate systems that only have
+# python[2][-config] in weird combinations but always preferring
+# python2 and python2-config as per pep-0394. If we catch a
+# python[-config] in version 3, the version check will kill it.
+PYTHON2 := $(if $(call get-executable,python2),python2,python)
+override PYTHON := $(call get-executable-or-default,PYTHON,$(PYTHON2))
+PYTHON2_CONFIG := \
+ $(if $(call get-executable,$(PYTHON)-config),$(PYTHON)-config,python-config)
+override PYTHON_CONFIG := \
+ $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON2_CONFIG))
- PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG))
+PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG))
- PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null)
- PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
+PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null)
+PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
- FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS)
- FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS)
- FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS)
- FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS)
-endif
+FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS)
+FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS)
+FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS)
+FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS)
CFLAGS += -fno-omit-frame-pointer
CFLAGS += -ggdb3
@@ -335,6 +328,13 @@ ifndef NO_LIBELF
endif # NO_LIBBPF
endif # NO_LIBELF
+ifdef PERF_HAVE_JITDUMP
+ ifndef NO_DWARF
+ $(call detected,CONFIG_JITDUMP)
+ CFLAGS += -DHAVE_JITDUMP
+ endif
+endif
+
ifeq ($(ARCH),powerpc)
ifndef NO_DWARF
CFLAGS += -DHAVE_SKIP_CALLCHAIN_IDX
@@ -411,6 +411,17 @@ ifndef NO_LIBAUDIT
endif
endif
+ifndef NO_LIBCRYPTO
+ ifneq ($(feature-libcrypto), 1)
+ msg := $(warning No libcrypto.h found, disables jitted code injection, please install libssl-devel or libssl-dev);
+ NO_LIBCRYPTO := 1
+ else
+ CFLAGS += -DHAVE_LIBCRYPTO_SUPPORT
+ EXTLIBS += -lcrypto
+ $(call detected,CONFIG_CRYPTO)
+ endif
+endif
+
ifdef NO_NEWT
NO_SLANG=1
endif
diff --git a/tools/perf/jvmti/Makefile b/tools/perf/jvmti/Makefile
new file mode 100644
index 000000000000..5ce61a1bda9c
--- /dev/null
+++ b/tools/perf/jvmti/Makefile
@@ -0,0 +1,89 @@
+ARCH=$(shell uname -m)
+
+ifeq ($(ARCH), x86_64)
+JARCH=amd64
+endif
+ifeq ($(ARCH), armv7l)
+JARCH=armhf
+endif
+ifeq ($(ARCH), armv6l)
+JARCH=armhf
+endif
+ifeq ($(ARCH), aarch64)
+JARCH=aarch64
+endif
+ifeq ($(ARCH), ppc64)
+JARCH=powerpc
+endif
+ifeq ($(ARCH), ppc64le)
+JARCH=powerpc
+endif
+
+DESTDIR=/usr/local
+
+VERSION=1
+REVISION=0
+AGE=0
+
+LN=ln -sf
+RM=rm
+
+SLIBJVMTI=libjvmti.so.$(VERSION).$(REVISION).$(AGE)
+VLIBJVMTI=libjvmti.so.$(VERSION)
+SLDFLAGS=-shared -Wl,-soname -Wl,$(VLIBJVMTI)
+SOLIBEXT=so
+
+# The following works at least on fedora 23, you may need the next
+# line for other distros.
+ifneq (,$(wildcard /usr/sbin/update-java-alternatives))
+JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | cut -d ' ' -f 3)
+else
+ ifneq (,$(wildcard /usr/sbin/alternatives))
+ JDIR=$(shell alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
+ endif
+endif
+ifndef JDIR
+$(error Could not find alternatives command, you need to set JDIR= to point to the root of your Java directory)
+else
+ ifeq (,$(wildcard $(JDIR)/include/jvmti.h))
+ $(error the openjdk development package appears to me missing, install and try again)
+ endif
+endif
+$(info Using Java from $(JDIR))
+# -lrt required in 32-bit mode for clock_gettime()
+LIBS=-lelf -lrt
+INCDIR=-I $(JDIR)/include -I $(JDIR)/include/linux
+
+TARGETS=$(SLIBJVMTI)
+
+SRCS=libjvmti.c jvmti_agent.c
+OBJS=$(SRCS:.c=.o)
+SOBJS=$(OBJS:.o=.lo)
+OPT=-O2 -g -Werror -Wall
+
+CFLAGS=$(INCDIR) $(OPT)
+
+all: $(TARGETS)
+
+.c.o:
+ $(CC) $(CFLAGS) -c $*.c
+.c.lo:
+ $(CC) -fPIC -DPIC $(CFLAGS) -c $*.c -o $*.lo
+
+$(OBJS) $(SOBJS): Makefile jvmti_agent.h ../util/jitdump.h
+
+$(SLIBJVMTI): $(SOBJS)
+ $(CC) $(CFLAGS) $(SLDFLAGS) -o $@ $(SOBJS) $(LIBS)
+ $(LN) $@ libjvmti.$(SOLIBEXT)
+
+clean:
+ $(RM) -f *.o *.so.* *.so *.lo
+
+install:
+ -mkdir -p $(DESTDIR)/lib
+ install -m 755 $(SLIBJVMTI) $(DESTDIR)/lib/
+ (cd $(DESTDIR)/lib; $(LN) $(SLIBJVMTI) $(VLIBJVMTI))
+ (cd $(DESTDIR)/lib; $(LN) $(SLIBJVMTI) libjvmti.$(SOLIBEXT))
+ ldconfig
+
+.SUFFIXES: .c .S .o .lo
diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c
new file mode 100644
index 000000000000..6461e02ab940
--- /dev/null
+++ b/tools/perf/jvmti/jvmti_agent.c
@@ -0,0 +1,465 @@
+/*
+ * jvmti_agent.c: JVMTI agent interface
+ *
+ * Adapted from the Oprofile code in opagent.c:
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Copyright 2007 OProfile authors
+ * Jens Wilke
+ * Daniel Hansel
+ * Copyright IBM Corporation 2007
+ */
+#include <sys/types.h>
+#include <sys/stat.h> /* for mkdir() */
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <limits.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <time.h>
+#include <sys/mman.h>
+#include <syscall.h> /* for gettid() */
+#include <err.h>
+
+#include "jvmti_agent.h"
+#include "../util/jitdump.h"
+
+#define JIT_LANG "java"
+
+static char jit_path[PATH_MAX];
+static void *marker_addr;
+
+/*
+ * padding buffer
+ */
+static const char pad_bytes[7];
+
+static inline pid_t gettid(void)
+{
+ return (pid_t)syscall(__NR_gettid);
+}
+
+static int get_e_machine(struct jitheader *hdr)
+{
+ ssize_t sret;
+ char id[16];
+ int fd, ret = -1;
+ int m = -1;
+ struct {
+ uint16_t e_type;
+ uint16_t e_machine;
+ } info;
+
+ fd = open("/proc/self/exe", O_RDONLY);
+ if (fd == -1)
+ return -1;
+
+ sret = read(fd, id, sizeof(id));
+ if (sret != sizeof(id))
+ goto error;
+
+ /* check ELF signature */
+ if (id[0] != 0x7f || id[1] != 'E' || id[2] != 'L' || id[3] != 'F')
+ goto error;
+
+ sret = read(fd, &info, sizeof(info));
+ if (sret != sizeof(info))
+ goto error;
+
+ m = info.e_machine;
+ if (m < 0)
+ m = 0; /* ELF EM_NONE */
+
+ hdr->elf_mach = m;
+ ret = 0;
+error:
+ close(fd);
+ return ret;
+}
+
+#define NSEC_PER_SEC 1000000000
+static int perf_clk_id = CLOCK_MONOTONIC;
+
+static inline uint64_t
+timespec_to_ns(const struct timespec *ts)
+{
+ return ((uint64_t) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
+}
+
+static inline uint64_t
+perf_get_timestamp(void)
+{
+ struct timespec ts;
+ int ret;
+
+ ret = clock_gettime(perf_clk_id, &ts);
+ if (ret)
+ return 0;
+
+ return timespec_to_ns(&ts);
+}
+
+static int
+debug_cache_init(void)
+{
+ char str[32];
+ char *base, *p;
+ struct tm tm;
+ time_t t;
+ int ret;
+
+ time(&t);
+ localtime_r(&t, &tm);
+
+ base = getenv("JITDUMPDIR");
+ if (!base)
+ base = getenv("HOME");
+ if (!base)
+ base = ".";
+
+ strftime(str, sizeof(str), JIT_LANG"-jit-%Y%m%d", &tm);
+
+ snprintf(jit_path, PATH_MAX - 1, "%s/.debug/", base);
+
+ ret = mkdir(jit_path, 0755);
+ if (ret == -1) {
+ if (errno != EEXIST) {
+ warn("jvmti: cannot create jit cache dir %s", jit_path);
+ return -1;
+ }
+ }
+
+ snprintf(jit_path, PATH_MAX - 1, "%s/.debug/jit", base);
+ ret = mkdir(jit_path, 0755);
+ if (ret == -1) {
+ if (errno != EEXIST) {
+ warn("cannot create jit cache dir %s", jit_path);
+ return -1;
+ }
+ }
+
+ snprintf(jit_path, PATH_MAX - 1, "%s/.debug/jit/%s.XXXXXXXX", base, str);
+
+ p = mkdtemp(jit_path);
+ if (p != jit_path) {
+ warn("cannot create jit cache dir %s", jit_path);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+perf_open_marker_file(int fd)
+{
+ long pgsz;
+
+ pgsz = sysconf(_SC_PAGESIZE);
+ if (pgsz == -1)
+ return -1;
+
+ /*
+ * we mmap the jitdump to create an MMAP RECORD in perf.data file.
+ * The mmap is captured either live (perf record running when we mmap)
+ * or in deferred mode, via /proc/PID/maps
+ * the MMAP record is used as a marker of a jitdump file for more meta
+ * data info about the jitted code. Perf report/annotate detect this
+ * special filename and process the jitdump file.
+ *
+ * mapping must be PROT_EXEC to ensure it is captured by perf record
+ * even when not using -d option
+ */
+ marker_addr = mmap(NULL, pgsz, PROT_READ|PROT_EXEC, MAP_PRIVATE, fd, 0);
+ return (marker_addr == MAP_FAILED) ? -1 : 0;
+}
+
+static void
+perf_close_marker_file(void)
+{
+ long pgsz;
+
+ if (!marker_addr)
+ return;
+
+ pgsz = sysconf(_SC_PAGESIZE);
+ if (pgsz == -1)
+ return;
+
+ munmap(marker_addr, pgsz);
+}
+
+void *jvmti_open(void)
+{
+ int pad_cnt;
+ char dump_path[PATH_MAX];
+ struct jitheader header;
+ int fd;
+ FILE *fp;
+
+ /*
+ * check if clockid is supported
+ */
+ if (!perf_get_timestamp())
+ warnx("jvmti: kernel does not support %d clock id", perf_clk_id);
+
+ memset(&header, 0, sizeof(header));
+
+ debug_cache_init();
+
+ /*
+ * jitdump file name
+ */
+ snprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
+
+ fd = open(dump_path, O_CREAT|O_TRUNC|O_RDWR, 0666);
+ if (fd == -1)
+ return NULL;
+
+ /*
+ * create perf.data maker for the jitdump file
+ */
+ if (perf_open_marker_file(fd)) {
+ warnx("jvmti: failed to create marker file");
+ return NULL;
+ }
+
+ fp = fdopen(fd, "w+");
+ if (!fp) {
+ warn("jvmti: cannot create %s", dump_path);
+ close(fd);
+ goto error;
+ }
+
+ warnx("jvmti: jitdump in %s", dump_path);
+
+ if (get_e_machine(&header)) {
+ warn("get_e_machine failed\n");
+ goto error;
+ }
+
+ header.magic = JITHEADER_MAGIC;
+ header.version = JITHEADER_VERSION;
+ header.total_size = sizeof(header);
+ header.pid = getpid();
+
+ /* calculate amount of padding '\0' */
+ pad_cnt = PADDING_8ALIGNED(header.total_size);
+ header.total_size += pad_cnt;
+
+ header.timestamp = perf_get_timestamp();
+
+ if (!fwrite(&header, sizeof(header), 1, fp)) {
+ warn("jvmti: cannot write dumpfile header");
+ goto error;
+ }
+
+ /* write padding '\0' if necessary */
+ if (pad_cnt && !fwrite(pad_bytes, pad_cnt, 1, fp)) {
+ warn("jvmti: cannot write dumpfile header padding");
+ goto error;
+ }
+
+ return fp;
+error:
+ fclose(fp);
+ return NULL;
+}
+
+int
+jvmti_close(void *agent)
+{
+ struct jr_code_close rec;
+ FILE *fp = agent;
+
+ if (!fp) {
+ warnx("jvmti: incalid fd in close_agent");
+ return -1;
+ }
+
+ rec.p.id = JIT_CODE_CLOSE;
+ rec.p.total_size = sizeof(rec);
+
+ rec.p.timestamp = perf_get_timestamp();
+
+ if (!fwrite(&rec, sizeof(rec), 1, fp))
+ return -1;
+
+ fclose(fp);
+
+ fp = NULL;
+
+ perf_close_marker_file();
+
+ return 0;
+}
+
+int
+jvmti_write_code(void *agent, char const *sym,
+ uint64_t vma, void const *code, unsigned int const size)
+{
+ static int code_generation = 1;
+ struct jr_code_load rec;
+ size_t sym_len;
+ size_t padding_count;
+ FILE *fp = agent;
+ int ret = -1;
+
+ /* don't care about 0 length function, no samples */
+ if (size == 0)
+ return 0;
+
+ if (!fp) {
+ warnx("jvmti: invalid fd in write_native_code");
+ return -1;
+ }
+
+ sym_len = strlen(sym) + 1;
+
+ rec.p.id = JIT_CODE_LOAD;
+ rec.p.total_size = sizeof(rec) + sym_len;
+ padding_count = PADDING_8ALIGNED(rec.p.total_size);
+ rec.p. total_size += padding_count;
+ rec.p.timestamp = perf_get_timestamp();
+
+ rec.code_size = size;
+ rec.vma = vma;
+ rec.code_addr = vma;
+ rec.pid = getpid();
+ rec.tid = gettid();
+
+ if (code)
+ rec.p.total_size += size;
+
+ /*
+ * If JVM is multi-threaded, nultiple concurrent calls to agent
+ * may be possible, so protect file writes
+ */
+ flockfile(fp);
+
+ /*
+ * get code index inside lock to avoid race condition
+ */
+ rec.code_index = code_generation++;
+
+ ret = fwrite_unlocked(&rec, sizeof(rec), 1, fp);
+ fwrite_unlocked(sym, sym_len, 1, fp);
+
+ if (padding_count)
+ fwrite_unlocked(pad_bytes, padding_count, 1, fp);
+
+ if (code)
+ fwrite_unlocked(code, size, 1, fp);
+
+ funlockfile(fp);
+
+ ret = 0;
+
+ return ret;
+}
+
+int
+jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
+ jvmti_line_info_t *li, int nr_lines)
+{
+ struct jr_code_debug_info rec;
+ size_t sret, len, size, flen;
+ size_t padding_count;
+ uint64_t addr;
+ const char *fn = file;
+ FILE *fp = agent;
+ int i;
+
+ /*
+ * no entry to write
+ */
+ if (!nr_lines)
+ return 0;
+
+ if (!fp) {
+ warnx("jvmti: invalid fd in write_debug_info");
+ return -1;
+ }
+
+ flen = strlen(file) + 1;
+
+ rec.p.id = JIT_CODE_DEBUG_INFO;
+ size = sizeof(rec);
+ rec.p.timestamp = perf_get_timestamp();
+ rec.code_addr = (uint64_t)(uintptr_t)code;
+ rec.nr_entry = nr_lines;
+
+ /*
+ * on disk source line info layout:
+ * uint64_t : addr
+ * int : line number
+ * int : column discriminator
+ * file[] : source file name
+ * padding : pad to multiple of 8 bytes
+ */
+ size += nr_lines * sizeof(struct debug_entry);
+ size += flen * nr_lines;
+ /*
+ * pad to 8 bytes
+ */
+ padding_count = PADDING_8ALIGNED(size);
+
+ rec.p.total_size = size + padding_count;
+
+ /*
+ * If JVM is multi-threaded, nultiple concurrent calls to agent
+ * may be possible, so protect file writes
+ */
+ flockfile(fp);
+
+ sret = fwrite_unlocked(&rec, sizeof(rec), 1, fp);
+ if (sret != 1)
+ goto error;
+
+ for (i = 0; i < nr_lines; i++) {
+
+ addr = (uint64_t)li[i].pc;
+ len = sizeof(addr);
+ sret = fwrite_unlocked(&addr, len, 1, fp);
+ if (sret != 1)
+ goto error;
+
+ len = sizeof(li[0].line_number);
+ sret = fwrite_unlocked(&li[i].line_number, len, 1, fp);
+ if (sret != 1)
+ goto error;
+
+ len = sizeof(li[0].discrim);
+ sret = fwrite_unlocked(&li[i].discrim, len, 1, fp);
+ if (sret != 1)
+ goto error;
+
+ sret = fwrite_unlocked(fn, flen, 1, fp);
+ if (sret != 1)
+ goto error;
+ }
+ if (padding_count)
+ sret = fwrite_unlocked(pad_bytes, padding_count, 1, fp);
+ if (sret != 1)
+ goto error;
+
+ funlockfile(fp);
+ return 0;
+error:
+ funlockfile(fp);
+ return -1;
+}
diff --git a/tools/perf/jvmti/jvmti_agent.h b/tools/perf/jvmti/jvmti_agent.h
new file mode 100644
index 000000000000..bedf5d0ba9ff
--- /dev/null
+++ b/tools/perf/jvmti/jvmti_agent.h
@@ -0,0 +1,36 @@
+#ifndef __JVMTI_AGENT_H__
+#define __JVMTI_AGENT_H__
+
+#include <sys/types.h>
+#include <stdint.h>
+#include <jvmti.h>
+
+#define __unused __attribute__((unused))
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+typedef struct {
+ unsigned long pc;
+ int line_number;
+ int discrim; /* discriminator -- 0 for now */
+} jvmti_line_info_t;
+
+void *jvmti_open(void);
+int jvmti_close(void *agent);
+int jvmti_write_code(void *agent, char const *symbol_name,
+ uint64_t vma, void const *code,
+ const unsigned int code_size);
+
+int jvmti_write_debug_info(void *agent,
+ uint64_t code,
+ const char *file,
+ jvmti_line_info_t *li,
+ int nr_lines);
+
+#if defined(__cplusplus)
+}
+
+#endif
+#endif /* __JVMTI_H__ */
diff --git a/tools/perf/jvmti/libjvmti.c b/tools/perf/jvmti/libjvmti.c
new file mode 100644
index 000000000000..ac12e4b91a92
--- /dev/null
+++ b/tools/perf/jvmti/libjvmti.c
@@ -0,0 +1,304 @@
+#include <sys/types.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <err.h>
+#include <jvmti.h>
+#include <jvmticmlr.h>
+#include <limits.h>
+
+#include "jvmti_agent.h"
+
+static int has_line_numbers;
+void *jvmti_agent;
+
+static jvmtiError
+do_get_line_numbers(jvmtiEnv *jvmti, void *pc, jmethodID m, jint bci,
+ jvmti_line_info_t *tab, jint *nr)
+{
+ jint i, lines = 0;
+ jint nr_lines = 0;
+ jvmtiLineNumberEntry *loc_tab = NULL;
+ jvmtiError ret;
+
+ ret = (*jvmti)->GetLineNumberTable(jvmti, m, &nr_lines, &loc_tab);
+ if (ret != JVMTI_ERROR_NONE)
+ return ret;
+
+ for (i = 0; i < nr_lines; i++) {
+ if (loc_tab[i].start_location < bci) {
+ tab[lines].pc = (unsigned long)pc;
+ tab[lines].line_number = loc_tab[i].line_number;
+ tab[lines].discrim = 0; /* not yet used */
+ lines++;
+ } else {
+ break;
+ }
+ }
+ (*jvmti)->Deallocate(jvmti, (unsigned char *)loc_tab);
+ *nr = lines;
+ return JVMTI_ERROR_NONE;
+}
+
+static jvmtiError
+get_line_numbers(jvmtiEnv *jvmti, const void *compile_info, jvmti_line_info_t **tab, int *nr_lines)
+{
+ const jvmtiCompiledMethodLoadRecordHeader *hdr;
+ jvmtiCompiledMethodLoadInlineRecord *rec;
+ jvmtiLineNumberEntry *lne = NULL;
+ PCStackInfo *c;
+ jint nr, ret;
+ int nr_total = 0;
+ int i, lines_total = 0;
+
+ if (!(tab && nr_lines))
+ return JVMTI_ERROR_NULL_POINTER;
+
+ /*
+ * Phase 1 -- get the number of lines necessary
+ */
+ for (hdr = compile_info; hdr != NULL; hdr = hdr->next) {
+ if (hdr->kind == JVMTI_CMLR_INLINE_INFO) {
+ rec = (jvmtiCompiledMethodLoadInlineRecord *)hdr;
+ for (i = 0; i < rec->numpcs; i++) {
+ c = rec->pcinfo + i;
+ nr = 0;
+ /*
+ * unfortunately, need a tab to get the number of lines!
+ */
+ ret = (*jvmti)->GetLineNumberTable(jvmti, c->methods[0], &nr, &lne);
+ if (ret == JVMTI_ERROR_NONE) {
+ /* free what was allocated for nothing */
+ (*jvmti)->Deallocate(jvmti, (unsigned char *)lne);
+ nr_total += (int)nr;
+ }
+ }
+ }
+ }
+
+ if (nr_total == 0)
+ return JVMTI_ERROR_NOT_FOUND;
+
+ /*
+ * Phase 2 -- allocate big enough line table
+ */
+ *tab = malloc(nr_total * sizeof(**tab));
+ if (!*tab)
+ return JVMTI_ERROR_OUT_OF_MEMORY;
+
+ for (hdr = compile_info; hdr != NULL; hdr = hdr->next) {
+ if (hdr->kind == JVMTI_CMLR_INLINE_INFO) {
+ rec = (jvmtiCompiledMethodLoadInlineRecord *)hdr;
+ for (i = 0; i < rec->numpcs; i++) {
+ c = rec->pcinfo + i;
+ nr = 0;
+ ret = do_get_line_numbers(jvmti, c->pc,
+ c->methods[0],
+ c->bcis[0],
+ *tab + lines_total,
+ &nr);
+ if (ret == JVMTI_ERROR_NONE)
+ lines_total += nr;
+ }
+ }
+ }
+ *nr_lines = lines_total;
+ return JVMTI_ERROR_NONE;
+}
+
+static void JNICALL
+compiled_method_load_cb(jvmtiEnv *jvmti,
+ jmethodID method,
+ jint code_size,
+ void const *code_addr,
+ jint map_length,
+ jvmtiAddrLocationMap const *map,
+ const void *compile_info)
+{
+ jvmti_line_info_t *line_tab = NULL;
+ jclass decl_class;
+ char *class_sign = NULL;
+ char *func_name = NULL;
+ char *func_sign = NULL;
+ char *file_name= NULL;
+ char fn[PATH_MAX];
+ uint64_t addr = (uint64_t)(uintptr_t)code_addr;
+ jvmtiError ret;
+ int nr_lines = 0; /* in line_tab[] */
+ size_t len;
+
+ ret = (*jvmti)->GetMethodDeclaringClass(jvmti, method,
+ &decl_class);
+ if (ret != JVMTI_ERROR_NONE) {
+ warnx("jvmti: cannot get declaring class");
+ return;
+ }
+
+ if (has_line_numbers && map && map_length) {
+ ret = get_line_numbers(jvmti, compile_info, &line_tab, &nr_lines);
+ if (ret != JVMTI_ERROR_NONE) {
+ warnx("jvmti: cannot get line table for method");
+ nr_lines = 0;
+ }
+ }
+
+ ret = (*jvmti)->GetSourceFileName(jvmti, decl_class, &file_name);
+ if (ret != JVMTI_ERROR_NONE) {
+ warnx("jvmti: cannot get source filename ret=%d", ret);
+ goto error;
+ }
+
+ ret = (*jvmti)->GetClassSignature(jvmti, decl_class,
+ &class_sign, NULL);
+ if (ret != JVMTI_ERROR_NONE) {
+ warnx("jvmti: getclassignature failed");
+ goto error;
+ }
+
+ ret = (*jvmti)->GetMethodName(jvmti, method, &func_name,
+ &func_sign, NULL);
+ if (ret != JVMTI_ERROR_NONE) {
+ warnx("jvmti: failed getmethodname");
+ goto error;
+ }
+
+ /*
+ * Assume path name is class hierarchy, this is a common practice with Java programs
+ */
+ if (*class_sign == 'L') {
+ int j, i = 0;
+ char *p = strrchr(class_sign, '/');
+ if (p) {
+ /* drop the 'L' prefix and copy up to the final '/' */
+ for (i = 0; i < (p - class_sign); i++)
+ fn[i] = class_sign[i+1];
+ }
+ /*
+ * append file name, we use loops and not string ops to avoid modifying
+ * class_sign which is used later for the symbol name
+ */
+ for (j = 0; i < (PATH_MAX - 1) && file_name && j < strlen(file_name); j++, i++)
+ fn[i] = file_name[j];
+ fn[i] = '\0';
+ } else {
+ /* fallback case */
+ strcpy(fn, file_name);
+ }
+ /*
+ * write source line info record if we have it
+ */
+ if (jvmti_write_debug_info(jvmti_agent, addr, fn, line_tab, nr_lines))
+ warnx("jvmti: write_debug_info() failed");
+
+ len = strlen(func_name) + strlen(class_sign) + strlen(func_sign) + 2;
+ {
+ char str[len];
+ snprintf(str, len, "%s%s%s", class_sign, func_name, func_sign);
+
+ if (jvmti_write_code(jvmti_agent, str, addr, code_addr, code_size))
+ warnx("jvmti: write_code() failed");
+ }
+error:
+ (*jvmti)->Deallocate(jvmti, (unsigned char *)func_name);
+ (*jvmti)->Deallocate(jvmti, (unsigned char *)func_sign);
+ (*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign);
+ (*jvmti)->Deallocate(jvmti, (unsigned char *)file_name);
+ free(line_tab);
+}
+
+static void JNICALL
+code_generated_cb(jvmtiEnv *jvmti,
+ char const *name,
+ void const *code_addr,
+ jint code_size)
+{
+ uint64_t addr = (uint64_t)(unsigned long)code_addr;
+ int ret;
+
+ ret = jvmti_write_code(jvmti_agent, name, addr, code_addr, code_size);
+ if (ret)
+ warnx("jvmti: write_code() failed for code_generated");
+}
+
+JNIEXPORT jint JNICALL
+Agent_OnLoad(JavaVM *jvm, char *options, void *reserved __unused)
+{
+ jvmtiEventCallbacks cb;
+ jvmtiCapabilities caps1;
+ jvmtiJlocationFormat format;
+ jvmtiEnv *jvmti = NULL;
+ jint ret;
+
+ jvmti_agent = jvmti_open();
+ if (!jvmti_agent) {
+ warnx("jvmti: open_agent failed");
+ return -1;
+ }
+
+ /*
+ * Request a JVMTI interface version 1 environment
+ */
+ ret = (*jvm)->GetEnv(jvm, (void *)&jvmti, JVMTI_VERSION_1);
+ if (ret != JNI_OK) {
+ warnx("jvmti: jvmti version 1 not supported");
+ return -1;
+ }
+
+ /*
+ * acquire method_load capability, we require it
+ * request line numbers (optional)
+ */
+ memset(&caps1, 0, sizeof(caps1));
+ caps1.can_generate_compiled_method_load_events = 1;
+
+ ret = (*jvmti)->AddCapabilities(jvmti, &caps1);
+ if (ret != JVMTI_ERROR_NONE) {
+ warnx("jvmti: acquire compiled_method capability failed");
+ return -1;
+ }
+ ret = (*jvmti)->GetJLocationFormat(jvmti, &format);
+ if (ret == JVMTI_ERROR_NONE && format == JVMTI_JLOCATION_JVMBCI) {
+ memset(&caps1, 0, sizeof(caps1));
+ caps1.can_get_line_numbers = 1;
+ caps1.can_get_source_file_name = 1;
+ ret = (*jvmti)->AddCapabilities(jvmti, &caps1);
+ if (ret == JVMTI_ERROR_NONE)
+ has_line_numbers = 1;
+ }
+
+ memset(&cb, 0, sizeof(cb));
+
+ cb.CompiledMethodLoad = compiled_method_load_cb;
+ cb.DynamicCodeGenerated = code_generated_cb;
+
+ ret = (*jvmti)->SetEventCallbacks(jvmti, &cb, sizeof(cb));
+ if (ret != JVMTI_ERROR_NONE) {
+ warnx("jvmti: cannot set event callbacks");
+ return -1;
+ }
+
+ ret = (*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE,
+ JVMTI_EVENT_COMPILED_METHOD_LOAD, NULL);
+ if (ret != JVMTI_ERROR_NONE) {
+ warnx("jvmti: setnotification failed for method_load");
+ return -1;
+ }
+
+ ret = (*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE,
+ JVMTI_EVENT_DYNAMIC_CODE_GENERATED, NULL);
+ if (ret != JVMTI_ERROR_NONE) {
+ warnx("jvmti: setnotification failed on code_generated");
+ return -1;
+ }
+ return 0;
+}
+
+JNIEXPORT void JNICALL
+Agent_OnUnload(JavaVM *jvm __unused)
+{
+ int ret;
+
+ ret = jvmti_close(jvmti_agent);
+ if (ret)
+ errx(1, "Error: op_close_agent()");
+}
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index a929618b8eb6..aaee0a782747 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -454,11 +454,12 @@ static void handle_internal_command(int argc, const char **argv)
static void execv_dashed_external(const char **argv)
{
- struct strbuf cmd = STRBUF_INIT;
+ char *cmd;
const char *tmp;
int status;
- strbuf_addf(&cmd, "perf-%s", argv[0]);
+ if (asprintf(&cmd, "perf-%s", argv[0]) < 0)
+ goto do_die;
/*
* argv[0] must be the perf command, but the argv array
@@ -467,7 +468,7 @@ static void execv_dashed_external(const char **argv)
* restore it on error.
*/
tmp = argv[0];
- argv[0] = cmd.buf;
+ argv[0] = cmd;
/*
* if we fail because the command is not found, it is
@@ -475,15 +476,16 @@ static void execv_dashed_external(const char **argv)
*/
status = run_command_v_opt(argv, 0);
if (status != -ERR_RUN_COMMAND_EXEC) {
- if (IS_RUN_COMMAND_ERR(status))
+ if (IS_RUN_COMMAND_ERR(status)) {
+do_die:
die("unable to run '%s'", argv[0]);
+ }
exit(-status);
}
errno = ENOENT; /* as if we called execvp */
argv[0] = tmp;
-
- strbuf_release(&cmd);
+ zfree(&cmd);
}
static int run_argv(int *argcp, const char ***argv)
@@ -546,6 +548,8 @@ int main(int argc, const char **argv)
srandom(time(NULL));
+ perf_config(perf_default_config, NULL);
+
/* get debugfs/tracefs mount point from /proc/mounts */
tracing_path_mount();
@@ -613,6 +617,8 @@ int main(int argc, const char **argv)
*/
pthread__block_sigwinch();
+ perf_debug_setup();
+
while (1) {
static int done_help;
int was_alias = run_argv(&argc, &argv);
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 90129accffbe..5381a01c0610 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -58,6 +58,8 @@ struct record_opts {
bool full_auxtrace;
bool auxtrace_snapshot_mode;
bool record_switch_events;
+ bool all_kernel;
+ bool all_user;
unsigned int freq;
unsigned int mmap_pages;
unsigned int auxtrace_mmap_pages;
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
index 15c8400240fd..1d95009592eb 100644
--- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
@@ -71,7 +71,10 @@ try:
except:
if not audit_package_warned:
audit_package_warned = True
- print "Install the audit-libs-python package to get syscall names"
+ print "Install the audit-libs-python package to get syscall names.\n" \
+ "For example:\n # apt-get install python-audit (Ubuntu)" \
+ "\n # yum install audit-libs-python (Fedora)" \
+ "\n etc.\n"
def syscall_name(id):
try:
diff --git a/tools/perf/tests/.gitignore b/tools/perf/tests/.gitignore
index bf016c439fbd..8cc30e731c73 100644
--- a/tools/perf/tests/.gitignore
+++ b/tools/perf/tests/.gitignore
@@ -1,3 +1,4 @@
llvm-src-base.c
llvm-src-kbuild.c
llvm-src-prologue.c
+llvm-src-relocation.c
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index 614899b88b37..1ba628ed049a 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -31,7 +31,7 @@ perf-y += sample-parsing.o
perf-y += parse-no-sample-id-all.o
perf-y += kmod-path.o
perf-y += thread-map.o
-perf-y += llvm.o llvm-src-base.o llvm-src-kbuild.o llvm-src-prologue.o
+perf-y += llvm.o llvm-src-base.o llvm-src-kbuild.o llvm-src-prologue.o llvm-src-relocation.o
perf-y += bpf.o
perf-y += topology.o
perf-y += cpumap.o
@@ -59,6 +59,13 @@ $(OUTPUT)tests/llvm-src-prologue.c: tests/bpf-script-test-prologue.c tests/Build
$(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
$(Q)echo ';' >> $@
+$(OUTPUT)tests/llvm-src-relocation.c: tests/bpf-script-test-relocation.c tests/Build
+ $(call rule_mkdir)
+ $(Q)echo '#include <tests/llvm.h>' > $@
+ $(Q)echo 'const char test_llvm__bpf_test_relocation[] =' >> $@
+ $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
+ $(Q)echo ';' >> $@
+
ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64))
perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
endif
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
index fb80c9eb6a95..e7664fe3bd33 100644
--- a/tools/perf/tests/bp_signal.c
+++ b/tools/perf/tests/bp_signal.c
@@ -29,14 +29,59 @@
static int fd1;
static int fd2;
+static int fd3;
static int overflows;
+static int overflows_2;
+
+volatile long the_var;
+
+
+/*
+ * Use ASM to ensure watchpoint and breakpoint can be triggered
+ * at one instruction.
+ */
+#if defined (__x86_64__)
+extern void __test_function(volatile long *ptr);
+asm (
+ ".globl __test_function\n"
+ "__test_function:\n"
+ "incq (%rdi)\n"
+ "ret\n");
+#elif defined (__aarch64__)
+extern void __test_function(volatile long *ptr);
+asm (
+ ".globl __test_function\n"
+ "__test_function:\n"
+ "str x30, [x0]\n"
+ "ret\n");
+
+#else
+static void __test_function(volatile long *ptr)
+{
+ *ptr = 0x1234;
+}
+#endif
__attribute__ ((noinline))
static int test_function(void)
{
+ __test_function(&the_var);
+ the_var++;
return time(NULL);
}
+static void sig_handler_2(int signum __maybe_unused,
+ siginfo_t *oh __maybe_unused,
+ void *uc __maybe_unused)
+{
+ overflows_2++;
+ if (overflows_2 > 10) {
+ ioctl(fd1, PERF_EVENT_IOC_DISABLE, 0);
+ ioctl(fd2, PERF_EVENT_IOC_DISABLE, 0);
+ ioctl(fd3, PERF_EVENT_IOC_DISABLE, 0);
+ }
+}
+
static void sig_handler(int signum __maybe_unused,
siginfo_t *oh __maybe_unused,
void *uc __maybe_unused)
@@ -54,10 +99,11 @@ static void sig_handler(int signum __maybe_unused,
*/
ioctl(fd1, PERF_EVENT_IOC_DISABLE, 0);
ioctl(fd2, PERF_EVENT_IOC_DISABLE, 0);
+ ioctl(fd3, PERF_EVENT_IOC_DISABLE, 0);
}
}
-static int bp_event(void *fn, int setup_signal)
+static int __event(bool is_x, void *addr, int sig)
{
struct perf_event_attr pe;
int fd;
@@ -67,8 +113,8 @@ static int bp_event(void *fn, int setup_signal)
pe.size = sizeof(struct perf_event_attr);
pe.config = 0;
- pe.bp_type = HW_BREAKPOINT_X;
- pe.bp_addr = (unsigned long) fn;
+ pe.bp_type = is_x ? HW_BREAKPOINT_X : HW_BREAKPOINT_W;
+ pe.bp_addr = (unsigned long) addr;
pe.bp_len = sizeof(long);
pe.sample_period = 1;
@@ -86,17 +132,25 @@ static int bp_event(void *fn, int setup_signal)
return TEST_FAIL;
}
- if (setup_signal) {
- fcntl(fd, F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC);
- fcntl(fd, F_SETSIG, SIGIO);
- fcntl(fd, F_SETOWN, getpid());
- }
+ fcntl(fd, F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC);
+ fcntl(fd, F_SETSIG, sig);
+ fcntl(fd, F_SETOWN, getpid());
ioctl(fd, PERF_EVENT_IOC_RESET, 0);
return fd;
}
+static int bp_event(void *addr, int sig)
+{
+ return __event(true, addr, sig);
+}
+
+static int wp_event(void *addr, int sig)
+{
+ return __event(false, addr, sig);
+}
+
static long long bp_count(int fd)
{
long long count;
@@ -114,7 +168,7 @@ static long long bp_count(int fd)
int test__bp_signal(int subtest __maybe_unused)
{
struct sigaction sa;
- long long count1, count2;
+ long long count1, count2, count3;
/* setup SIGIO signal handler */
memset(&sa, 0, sizeof(struct sigaction));
@@ -126,21 +180,52 @@ int test__bp_signal(int subtest __maybe_unused)
return TEST_FAIL;
}
+ sa.sa_sigaction = (void *) sig_handler_2;
+ if (sigaction(SIGUSR1, &sa, NULL) < 0) {
+ pr_debug("failed setting up signal handler 2\n");
+ return TEST_FAIL;
+ }
+
/*
* We create following events:
*
- * fd1 - breakpoint event on test_function with SIGIO
+ * fd1 - breakpoint event on __test_function with SIGIO
* signal configured. We should get signal
* notification each time the breakpoint is hit
*
- * fd2 - breakpoint event on sig_handler without SIGIO
+ * fd2 - breakpoint event on sig_handler with SIGUSR1
+ * configured. We should get SIGUSR1 each time when
+ * breakpoint is hit
+ *
+ * fd3 - watchpoint event on __test_function with SIGIO
* configured.
*
* Following processing should happen:
- * - execute test_function
- * - fd1 event breakpoint hit -> count1 == 1
- * - SIGIO is delivered -> overflows == 1
- * - fd2 event breakpoint hit -> count2 == 1
+ * Exec: Action: Result:
+ * incq (%rdi) - fd1 event breakpoint hit -> count1 == 1
+ * - SIGIO is delivered
+ * sig_handler - fd2 event breakpoint hit -> count2 == 1
+ * - SIGUSR1 is delivered
+ * sig_handler_2 -> overflows_2 == 1 (nested signal)
+ * sys_rt_sigreturn - return from sig_handler_2
+ * overflows++ -> overflows = 1
+ * sys_rt_sigreturn - return from sig_handler
+ * incq (%rdi) - fd3 event watchpoint hit -> count3 == 1 (wp and bp in one insn)
+ * - SIGIO is delivered
+ * sig_handler - fd2 event breakpoint hit -> count2 == 2
+ * - SIGUSR1 is delivered
+ * sig_handler_2 -> overflows_2 == 2 (nested signal)
+ * sys_rt_sigreturn - return from sig_handler_2
+ * overflows++ -> overflows = 2
+ * sys_rt_sigreturn - return from sig_handler
+ * the_var++ - fd3 event watchpoint hit -> count3 == 2 (standalone watchpoint)
+ * - SIGIO is delivered
+ * sig_handler - fd2 event breakpoint hit -> count2 == 3
+ * - SIGUSR1 is delivered
+ * sig_handler_2 -> overflows_2 == 3 (nested signal)
+ * sys_rt_sigreturn - return from sig_handler_2
+ * overflows++ -> overflows == 3
+ * sys_rt_sigreturn - return from sig_handler
*
* The test case check following error conditions:
* - we get stuck in signal handler because of debug
@@ -152,11 +237,13 @@ int test__bp_signal(int subtest __maybe_unused)
*
*/
- fd1 = bp_event(test_function, 1);
- fd2 = bp_event(sig_handler, 0);
+ fd1 = bp_event(__test_function, SIGIO);
+ fd2 = bp_event(sig_handler, SIGUSR1);
+ fd3 = wp_event((void *)&the_var, SIGIO);
ioctl(fd1, PERF_EVENT_IOC_ENABLE, 0);
ioctl(fd2, PERF_EVENT_IOC_ENABLE, 0);
+ ioctl(fd3, PERF_EVENT_IOC_ENABLE, 0);
/*
* Kick off the test by trigering 'fd1'
@@ -166,15 +253,18 @@ int test__bp_signal(int subtest __maybe_unused)
ioctl(fd1, PERF_EVENT_IOC_DISABLE, 0);
ioctl(fd2, PERF_EVENT_IOC_DISABLE, 0);
+ ioctl(fd3, PERF_EVENT_IOC_DISABLE, 0);
count1 = bp_count(fd1);
count2 = bp_count(fd2);
+ count3 = bp_count(fd3);
close(fd1);
close(fd2);
+ close(fd3);
- pr_debug("count1 %lld, count2 %lld, overflow %d\n",
- count1, count2, overflows);
+ pr_debug("count1 %lld, count2 %lld, count3 %lld, overflow %d, overflows_2 %d\n",
+ count1, count2, count3, overflows, overflows_2);
if (count1 != 1) {
if (count1 == 11)
@@ -183,12 +273,18 @@ int test__bp_signal(int subtest __maybe_unused)
pr_debug("failed: wrong count for bp1%lld\n", count1);
}
- if (overflows != 1)
+ if (overflows != 3)
pr_debug("failed: wrong overflow hit\n");
- if (count2 != 1)
+ if (overflows_2 != 3)
+ pr_debug("failed: wrong overflow_2 hit\n");
+
+ if (count2 != 3)
pr_debug("failed: wrong count for bp2\n");
- return count1 == 1 && overflows == 1 && count2 == 1 ?
+ if (count3 != 2)
+ pr_debug("failed: wrong count for bp3\n");
+
+ return count1 == 1 && overflows == 3 && count2 == 3 && overflows_2 == 3 && count3 == 2 ?
TEST_OK : TEST_FAIL;
}
diff --git a/tools/perf/tests/bpf-script-test-relocation.c b/tools/perf/tests/bpf-script-test-relocation.c
new file mode 100644
index 000000000000..93af77421816
--- /dev/null
+++ b/tools/perf/tests/bpf-script-test-relocation.c
@@ -0,0 +1,50 @@
+/*
+ * bpf-script-test-relocation.c
+ * Test BPF loader checking relocation
+ */
+#ifndef LINUX_VERSION_CODE
+# error Need LINUX_VERSION_CODE
+# error Example: for 4.2 kernel, put 'clang-opt="-DLINUX_VERSION_CODE=0x40200" into llvm section of ~/.perfconfig'
+#endif
+#define BPF_ANY 0
+#define BPF_MAP_TYPE_ARRAY 2
+#define BPF_FUNC_map_lookup_elem 1
+#define BPF_FUNC_map_update_elem 2
+
+static void *(*bpf_map_lookup_elem)(void *map, void *key) =
+ (void *) BPF_FUNC_map_lookup_elem;
+static void *(*bpf_map_update_elem)(void *map, void *key, void *value, int flags) =
+ (void *) BPF_FUNC_map_update_elem;
+
+struct bpf_map_def {
+ unsigned int type;
+ unsigned int key_size;
+ unsigned int value_size;
+ unsigned int max_entries;
+};
+
+#define SEC(NAME) __attribute__((section(NAME), used))
+struct bpf_map_def SEC("maps") my_table = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .max_entries = 1,
+};
+
+int this_is_a_global_val;
+
+SEC("func=sys_write")
+int bpf_func__sys_write(void *ctx)
+{
+ int key = 0;
+ int value = 0;
+
+ /*
+ * Incorrect relocation. Should not allow this program be
+ * loaded into kernel.
+ */
+ bpf_map_update_elem(&this_is_a_global_val, &key, &value, 0);
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
+int _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
index 33689a0cf821..199501c71e27 100644
--- a/tools/perf/tests/bpf.c
+++ b/tools/perf/tests/bpf.c
@@ -1,7 +1,11 @@
#include <stdio.h>
#include <sys/epoll.h>
+#include <util/util.h>
#include <util/bpf-loader.h>
#include <util/evlist.h>
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <bpf/bpf.h>
#include "tests.h"
#include "llvm.h"
#include "debug.h"
@@ -71,6 +75,15 @@ static struct {
(NR_ITERS + 1) / 4,
},
#endif
+ {
+ LLVM_TESTCASE_BPF_RELOCATION,
+ "Test BPF relocation checker",
+ "[bpf_relocation_test]",
+ "fix 'perf test LLVM' first",
+ "libbpf error when dealing with relocation",
+ NULL,
+ 0,
+ },
};
static int do_test(struct bpf_object *obj, int (*func)(void),
@@ -99,7 +112,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
parse_evlist.error = &parse_error;
INIT_LIST_HEAD(&parse_evlist.list);
- err = parse_events_load_bpf_obj(&parse_evlist, &parse_evlist.list, obj);
+ err = parse_events_load_bpf_obj(&parse_evlist, &parse_evlist.list, obj, NULL);
if (err || list_empty(&parse_evlist.list)) {
pr_debug("Failed to add events selected by BPF\n");
return TEST_FAIL;
@@ -190,7 +203,7 @@ static int __test__bpf(int idx)
ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
bpf_testcase_table[idx].prog_id,
- true);
+ true, NULL);
if (ret != TEST_OK || !obj_buf || !obj_buf_sz) {
pr_debug("Unable to get BPF object, %s\n",
bpf_testcase_table[idx].msg_compile_fail);
@@ -202,14 +215,21 @@ static int __test__bpf(int idx)
obj = prepare_bpf(obj_buf, obj_buf_sz,
bpf_testcase_table[idx].name);
- if (!obj) {
+ if ((!!bpf_testcase_table[idx].target_func) != (!!obj)) {
+ if (!obj)
+ pr_debug("Fail to load BPF object: %s\n",
+ bpf_testcase_table[idx].msg_load_fail);
+ else
+ pr_debug("Success unexpectedly: %s\n",
+ bpf_testcase_table[idx].msg_load_fail);
ret = TEST_FAIL;
goto out;
}
- ret = do_test(obj,
- bpf_testcase_table[idx].target_func,
- bpf_testcase_table[idx].expect_result);
+ if (obj)
+ ret = do_test(obj,
+ bpf_testcase_table[idx].target_func,
+ bpf_testcase_table[idx].expect_result);
out:
bpf__clear();
return ret;
@@ -227,6 +247,36 @@ const char *test__bpf_subtest_get_desc(int i)
return bpf_testcase_table[i].desc;
}
+static int check_env(void)
+{
+ int err;
+ unsigned int kver_int;
+ char license[] = "GPL";
+
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ };
+
+ err = fetch_kernel_version(&kver_int, NULL, 0);
+ if (err) {
+ pr_debug("Unable to get kernel version\n");
+ return err;
+ }
+
+ err = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
+ sizeof(insns) / sizeof(insns[0]),
+ license, kver_int, NULL, 0);
+ if (err < 0) {
+ pr_err("Missing basic BPF support, skip this test: %s\n",
+ strerror(errno));
+ return err;
+ }
+ close(err);
+
+ return 0;
+}
+
int test__bpf(int i)
{
int err;
@@ -239,6 +289,9 @@ int test__bpf(int i)
return TEST_SKIP;
}
+ if (check_env())
+ return TEST_SKIP;
+
err = __test__bpf(i);
return err;
}
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 313a48c6b2bc..afc9ad0a0515 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -439,7 +439,7 @@ static int do_test_code_reading(bool try_kcore)
.mmap_pages = UINT_MAX,
.user_freq = UINT_MAX,
.user_interval = ULLONG_MAX,
- .freq = 4000,
+ .freq = 500,
.target = {
.uses_mmap = true,
},
@@ -559,7 +559,13 @@ static int do_test_code_reading(bool try_kcore)
evlist = NULL;
continue;
}
- pr_debug("perf_evlist__open failed\n");
+
+ if (verbose) {
+ char errbuf[512];
+ perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
+ pr_debug("perf_evlist__open() failed!\n%s\n", errbuf);
+ }
+
goto out_put;
}
break;
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index 5e6a86e50fb9..ecf136c385d5 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -191,7 +191,7 @@ static int do_test(struct hists *hists, struct result *expected, size_t nr_expec
* function since TEST_ASSERT_VAL() returns in case of failure.
*/
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists, NULL);
+ perf_evsel__output_resort(hists_to_evsel(hists), NULL);
if (verbose > 2) {
pr_info("use callchain: %d, cumulate callchain: %d\n",
diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c
index 351a42463444..34b945a55d4d 100644
--- a/tools/perf/tests/hists_filter.c
+++ b/tools/perf/tests/hists_filter.c
@@ -145,7 +145,7 @@ int test__hists_filter(int subtest __maybe_unused)
struct hists *hists = evsel__hists(evsel);
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists, NULL);
+ perf_evsel__output_resort(evsel, NULL);
if (verbose > 2) {
pr_info("Normal histogram\n");
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
index b231265148d8..23cce67c7e48 100644
--- a/tools/perf/tests/hists_output.c
+++ b/tools/perf/tests/hists_output.c
@@ -156,7 +156,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists, NULL);
+ perf_evsel__output_resort(evsel, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -256,7 +256,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists, NULL);
+ perf_evsel__output_resort(evsel, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -310,7 +310,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists, NULL);
+ perf_evsel__output_resort(evsel, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -388,7 +388,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists, NULL);
+ perf_evsel__output_resort(evsel, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -491,7 +491,7 @@ static int test5(struct perf_evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists, NULL);
+ perf_evsel__output_resort(evsel, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
diff --git a/tools/perf/tests/llvm.c b/tools/perf/tests/llvm.c
index 06f45c1d4256..cff564fb4b66 100644
--- a/tools/perf/tests/llvm.c
+++ b/tools/perf/tests/llvm.c
@@ -6,12 +6,6 @@
#include "tests.h"
#include "debug.h"
-static int perf_config_cb(const char *var, const char *val,
- void *arg __maybe_unused)
-{
- return perf_default_config(var, val, arg);
-}
-
#ifdef HAVE_LIBBPF_SUPPORT
static int test__bpf_parsing(void *obj_buf, size_t obj_buf_sz)
{
@@ -35,6 +29,7 @@ static int test__bpf_parsing(void *obj_buf __maybe_unused,
static struct {
const char *source;
const char *desc;
+ bool should_load_fail;
} bpf_source_table[__LLVM_TESTCASE_MAX] = {
[LLVM_TESTCASE_BASE] = {
.source = test_llvm__bpf_base_prog,
@@ -48,14 +43,19 @@ static struct {
.source = test_llvm__bpf_test_prologue_prog,
.desc = "Compile source for BPF prologue generation test",
},
+ [LLVM_TESTCASE_BPF_RELOCATION] = {
+ .source = test_llvm__bpf_test_relocation,
+ .desc = "Compile source for BPF relocation test",
+ .should_load_fail = true,
+ },
};
-
int
test_llvm__fetch_bpf_obj(void **p_obj_buf,
size_t *p_obj_buf_sz,
enum test_llvm__testcase idx,
- bool force)
+ bool force,
+ bool *should_load_fail)
{
const char *source;
const char *desc;
@@ -68,8 +68,8 @@ test_llvm__fetch_bpf_obj(void **p_obj_buf,
source = bpf_source_table[idx].source;
desc = bpf_source_table[idx].desc;
-
- perf_config(perf_config_cb, NULL);
+ if (should_load_fail)
+ *should_load_fail = bpf_source_table[idx].should_load_fail;
/*
* Skip this test if user's .perfconfig doesn't set [llvm] section
@@ -136,14 +136,15 @@ int test__llvm(int subtest)
int ret;
void *obj_buf = NULL;
size_t obj_buf_sz = 0;
+ bool should_load_fail = false;
if ((subtest < 0) || (subtest >= __LLVM_TESTCASE_MAX))
return TEST_FAIL;
ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
- subtest, false);
+ subtest, false, &should_load_fail);
- if (ret == TEST_OK) {
+ if (ret == TEST_OK && !should_load_fail) {
ret = test__bpf_parsing(obj_buf, obj_buf_sz);
if (ret != TEST_OK) {
pr_debug("Failed to parse test case '%s'\n",
diff --git a/tools/perf/tests/llvm.h b/tools/perf/tests/llvm.h
index 5150b4d6ef50..0eaa604be99d 100644
--- a/tools/perf/tests/llvm.h
+++ b/tools/perf/tests/llvm.h
@@ -7,14 +7,17 @@
extern const char test_llvm__bpf_base_prog[];
extern const char test_llvm__bpf_test_kbuild_prog[];
extern const char test_llvm__bpf_test_prologue_prog[];
+extern const char test_llvm__bpf_test_relocation[];
enum test_llvm__testcase {
LLVM_TESTCASE_BASE,
LLVM_TESTCASE_KBUILD,
LLVM_TESTCASE_BPF_PROLOGUE,
+ LLVM_TESTCASE_BPF_RELOCATION,
__LLVM_TESTCASE_MAX,
};
int test_llvm__fetch_bpf_obj(void **p_obj_buf, size_t *p_obj_buf_sz,
- enum test_llvm__testcase index, bool force);
+ enum test_llvm__testcase index, bool force,
+ bool *should_load_fail);
#endif
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index f918015512af..cac15d93aea6 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -15,6 +15,7 @@ else
PERF := .
PERF_O := $(PERF)
O_OPT :=
+FULL_O := $(shell readlink -f $(PERF_O) || echo $(PERF_O))
ifneq ($(O),)
FULL_O := $(shell readlink -f $(O) || echo $(O))
@@ -79,6 +80,7 @@ make_no_libaudit := NO_LIBAUDIT=1
make_no_libbionic := NO_LIBBIONIC=1
make_no_auxtrace := NO_AUXTRACE=1
make_no_libbpf := NO_LIBBPF=1
+make_no_libcrypto := NO_LIBCRYPTO=1
make_tags := tags
make_cscope := cscope
make_help := help
@@ -102,6 +104,7 @@ make_minimal := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1
make_minimal += NO_DEMANGLE=1 NO_LIBELF=1 NO_LIBUNWIND=1 NO_BACKTRACE=1
make_minimal += NO_LIBNUMA=1 NO_LIBAUDIT=1 NO_LIBBIONIC=1
make_minimal += NO_LIBDW_DWARF_UNWIND=1 NO_AUXTRACE=1 NO_LIBBPF=1
+make_minimal += NO_LIBCRYPTO=1
# $(run) contains all available tests
run := make_pure
@@ -110,6 +113,9 @@ run := make_pure
# disable features detection
ifeq ($(MK),Makefile)
run += make_clean_all
+MAKE_F := $(MAKE)
+else
+MAKE_F := $(MAKE) -f $(MK)
endif
run += make_python_perf_so
run += make_debug
@@ -260,6 +266,8 @@ run := $(shell shuf -e $(run))
run_O := $(shell shuf -e $(run_O))
endif
+max_width := $(shell echo $(run_O) | sed 's/ /\n/g' | wc -L)
+
ifdef DEBUG
d := $(info run $(run))
d := $(info run_O $(run_O))
@@ -267,13 +275,13 @@ endif
MAKEFLAGS := --no-print-directory
-clean := @(cd $(PERF); make -s -f $(MK) $(O_OPT) clean >/dev/null)
+clean := @(cd $(PERF); $(MAKE_F) -s $(O_OPT) clean >/dev/null)
$(run):
$(call clean)
@TMP_DEST=$$(mktemp -d); \
- cmd="cd $(PERF) && make -f $(MK) $(PARALLEL_OPT) $(O_OPT) DESTDIR=$$TMP_DEST $($@)"; \
- echo "- $@: $$cmd" && echo $$cmd > $@ && \
+ cmd="cd $(PERF) && $(MAKE_F) $($@) $(PARALLEL_OPT) $(O_OPT) DESTDIR=$$TMP_DEST"; \
+ printf "%*.*s: %s\n" $(max_width) $(max_width) "$@" "$$cmd" && echo $$cmd > $@ && \
( eval $$cmd ) >> $@ 2>&1; \
echo " test: $(call test,$@)" >> $@ 2>&1; \
$(call test,$@) && \
@@ -283,8 +291,8 @@ $(run_O):
$(call clean)
@TMP_O=$$(mktemp -d); \
TMP_DEST=$$(mktemp -d); \
- cmd="cd $(PERF) && make -f $(MK) $(PARALLEL_OPT) O=$$TMP_O DESTDIR=$$TMP_DEST $($(patsubst %_O,%,$@))"; \
- echo "- $@: $$cmd" && echo $$cmd > $@ && \
+ cmd="cd $(PERF) && $(MAKE_F) $($(patsubst %_O,%,$@)) $(PARALLEL_OPT) O=$$TMP_O DESTDIR=$$TMP_DEST"; \
+ printf "%*.*s: %s\n" $(max_width) $(max_width) "$@" "$$cmd" && echo $$cmd > $@ && \
( eval $$cmd ) >> $@ 2>&1 && \
echo " test: $(call test_O,$@)" >> $@ 2>&1; \
$(call test_O,$@) && \
@@ -313,11 +321,43 @@ make_kernelsrc_tools:
(make -C ../../tools $(PARALLEL_OPT) $(K_O_OPT) perf) > $@ 2>&1 && \
test -x $(KERNEL_O)/tools/perf/perf && rm -f $@ || (cat $@ ; false)
+FEATURES_DUMP_FILE := $(FULL_O)/BUILD_TEST_FEATURE_DUMP
+FEATURES_DUMP_FILE_STATIC := $(FULL_O)/BUILD_TEST_FEATURE_DUMP_STATIC
+
all: $(run) $(run_O) tarpkg make_kernelsrc make_kernelsrc_tools
@echo OK
+ @rm -f $(FEATURES_DUMP_FILE) $(FEATURES_DUMP_FILE_STATIC)
out: $(run_O)
@echo OK
+ @rm -f $(FEATURES_DUMP_FILE) $(FEATURES_DUMP_FILE_STATIC)
+
+ifeq ($(REUSE_FEATURES_DUMP),1)
+$(FEATURES_DUMP_FILE):
+ $(call clean)
+ @cmd="cd $(PERF) && make FEATURE_DUMP_COPY=$@ $(O_OPT) feature-dump"; \
+ echo "- $@: $$cmd" && echo $$cmd && \
+ ( eval $$cmd ) > /dev/null 2>&1
+
+$(FEATURES_DUMP_FILE_STATIC):
+ $(call clean)
+ @cmd="cd $(PERF) && make FEATURE_DUMP_COPY=$@ $(O_OPT) LDFLAGS='-static' feature-dump"; \
+ echo "- $@: $$cmd" && echo $$cmd && \
+ ( eval $$cmd ) > /dev/null 2>&1
+
+# Add feature dump dependency for run/run_O targets
+$(foreach t,$(run) $(run_O),$(eval \
+ $(t): $(if $(findstring make_static,$(t)),\
+ $(FEATURES_DUMP_FILE_STATIC),\
+ $(FEATURES_DUMP_FILE))))
+
+# Append 'FEATURES_DUMP=' option to all test cases. For example:
+# make_no_libbpf: NO_LIBBPF=1 --> NO_LIBBPF=1 FEATURES_DUMP=/a/b/BUILD_TEST_FEATURE_DUMP
+# make_static: LDFLAGS=-static --> LDFLAGS=-static FEATURES_DUMP=/a/b/BUILD_TEST_FEATURE_DUMP_STATIC
+$(foreach t,$(run),$(if $(findstring make_static,$(t)),\
+ $(eval $(t) := $($(t)) FEATURES_DUMP=$(FEATURES_DUMP_FILE_STATIC)),\
+ $(eval $(t) := $($(t)) FEATURES_DUMP=$(FEATURES_DUMP_FILE))))
+endif
.PHONY: all $(run) $(run_O) tarpkg clean make_kernelsrc make_kernelsrc_tools
endif # ifndef MK
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index abe8849d1d70..7865f68dc0d8 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -1271,6 +1271,38 @@ static int test__checkevent_precise_max_modifier(struct perf_evlist *evlist)
return 0;
}
+static int test__checkevent_config_symbol(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "insn") == 0);
+ return 0;
+}
+
+static int test__checkevent_config_raw(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "rawpmu") == 0);
+ return 0;
+}
+
+static int test__checkevent_config_num(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "numpmu") == 0);
+ return 0;
+}
+
+static int test__checkevent_config_cache(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "cachepmu") == 0);
+ return 0;
+}
+
static int count_tracepoints(void)
{
struct dirent *events_ent;
@@ -1579,6 +1611,26 @@ static struct evlist_test test__events[] = {
.check = test__checkevent_precise_max_modifier,
.id = 47,
},
+ {
+ .name = "instructions/name=insn/",
+ .check = test__checkevent_config_symbol,
+ .id = 48,
+ },
+ {
+ .name = "r1234/name=rawpmu/",
+ .check = test__checkevent_config_raw,
+ .id = 49,
+ },
+ {
+ .name = "4:0x6530160/name=numpmu/",
+ .check = test__checkevent_config_num,
+ .id = 50,
+ },
+ {
+ .name = "L1-dcache-misses/name=cachepmu/",
+ .check = test__checkevent_config_cache,
+ .id = 51,
+ },
};
static struct evlist_test test__events_pmu[] = {
@@ -1666,7 +1718,7 @@ static int test_term(struct terms_test *t)
}
ret = t->check(&terms);
- parse_events__free_terms(&terms);
+ parse_events_terms__purge(&terms);
return ret;
}
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index f0bfc9e8fd9f..630b0b409b97 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -110,7 +110,6 @@ int test__vmlinux_matches_kallsyms(int subtest __maybe_unused)
*/
for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) {
struct symbol *pair, *first_pair;
- bool backwards = true;
sym = rb_entry(nd, struct symbol, rb_node);
@@ -151,27 +150,14 @@ next_pair:
continue;
} else {
- struct rb_node *nnd;
-detour:
- nnd = backwards ? rb_prev(&pair->rb_node) :
- rb_next(&pair->rb_node);
- if (nnd) {
- struct symbol *next = rb_entry(nnd, struct symbol, rb_node);
-
- if (UM(next->start) == mem_start) {
- pair = next;
+ pair = machine__find_kernel_symbol_by_name(&kallsyms, type, sym->name, NULL, NULL);
+ if (pair) {
+ if (UM(pair->start) == mem_start)
goto next_pair;
- }
- }
- if (backwards) {
- backwards = false;
- pair = first_pair;
- goto detour;
+ pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
+ mem_start, sym->name, pair->name);
}
-
- pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
- mem_start, sym->name, pair->name);
}
} else
pr_debug("%#" PRIx64 ": %s not on kallsyms\n",
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index d37202121689..af68a9d488bf 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -531,8 +531,8 @@ static struct ui_browser_colorset {
.bg = "yellow",
},
{
- .colorset = HE_COLORSET_CODE,
- .name = "code",
+ .colorset = HE_COLORSET_JUMP_ARROWS,
+ .name = "jump_arrows",
.fg = "blue",
.bg = "default",
},
diff --git a/tools/perf/ui/browser.h b/tools/perf/ui/browser.h
index 01781de59532..be3b70eb5fca 100644
--- a/tools/perf/ui/browser.h
+++ b/tools/perf/ui/browser.h
@@ -7,7 +7,7 @@
#define HE_COLORSET_MEDIUM 51
#define HE_COLORSET_NORMAL 52
#define HE_COLORSET_SELECTED 53
-#define HE_COLORSET_CODE 54
+#define HE_COLORSET_JUMP_ARROWS 54
#define HE_COLORSET_ADDR 55
#define HE_COLORSET_ROOT 56
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 718bd46d47fa..4fc208e82c6f 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -284,7 +284,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
to = (u64)btarget->idx;
}
- ui_browser__set_color(browser, HE_COLORSET_CODE);
+ ui_browser__set_color(browser, HE_COLORSET_JUMP_ARROWS);
__ui_browser__line_arrow(browser, pcnt_width + 2 + ab->addr_width,
from, to);
}
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 08c09ad755d2..4b9816555946 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -32,6 +32,7 @@ struct hist_browser {
bool show_headers;
float min_pcnt;
u64 nr_non_filtered_entries;
+ u64 nr_hierarchy_entries;
u64 nr_callchain_rows;
};
@@ -58,11 +59,11 @@ static int hist_browser__get_folding(struct hist_browser *browser)
for (nd = rb_first(&hists->entries);
(nd = hists__filter_entries(nd, browser->min_pcnt)) != NULL;
- nd = rb_next(nd)) {
+ nd = rb_hierarchy_next(nd)) {
struct hist_entry *he =
rb_entry(nd, struct hist_entry, rb_node);
- if (he->unfolded)
+ if (he->leaf && he->unfolded)
unfolded_rows += he->nr_rows;
}
return unfolded_rows;
@@ -72,7 +73,9 @@ static u32 hist_browser__nr_entries(struct hist_browser *hb)
{
u32 nr_entries;
- if (hist_browser__has_filter(hb))
+ if (symbol_conf.report_hierarchy)
+ nr_entries = hb->nr_hierarchy_entries;
+ else if (hist_browser__has_filter(hb))
nr_entries = hb->nr_non_filtered_entries;
else
nr_entries = hb->hists->nr_entries;
@@ -247,6 +250,38 @@ static int callchain__count_rows(struct rb_root *chain)
return n;
}
+static int hierarchy_count_rows(struct hist_browser *hb, struct hist_entry *he,
+ bool include_children)
+{
+ int count = 0;
+ struct rb_node *node;
+ struct hist_entry *child;
+
+ if (he->leaf)
+ return callchain__count_rows(&he->sorted_chain);
+
+ if (he->has_no_entry)
+ return 1;
+
+ node = rb_first(&he->hroot_out);
+ while (node) {
+ float percent;
+
+ child = rb_entry(node, struct hist_entry, rb_node);
+ percent = hist_entry__get_percent_limit(child);
+
+ if (!child->filtered && percent >= hb->min_pcnt) {
+ count++;
+
+ if (include_children && child->unfolded)
+ count += hierarchy_count_rows(hb, child, true);
+ }
+
+ node = rb_next(node);
+ }
+ return count;
+}
+
static bool hist_entry__toggle_fold(struct hist_entry *he)
{
if (!he)
@@ -326,11 +361,17 @@ static void callchain__init_have_children(struct rb_root *root)
static void hist_entry__init_have_children(struct hist_entry *he)
{
- if (!he->init_have_children) {
+ if (he->init_have_children)
+ return;
+
+ if (he->leaf) {
he->has_children = !RB_EMPTY_ROOT(&he->sorted_chain);
callchain__init_have_children(&he->sorted_chain);
- he->init_have_children = true;
+ } else {
+ he->has_children = !RB_EMPTY_ROOT(&he->hroot_out);
}
+
+ he->init_have_children = true;
}
static bool hist_browser__toggle_fold(struct hist_browser *browser)
@@ -349,17 +390,49 @@ static bool hist_browser__toggle_fold(struct hist_browser *browser)
has_children = callchain_list__toggle_fold(cl);
if (has_children) {
+ int child_rows = 0;
+
hist_entry__init_have_children(he);
browser->b.nr_entries -= he->nr_rows;
- browser->nr_callchain_rows -= he->nr_rows;
- if (he->unfolded)
- he->nr_rows = callchain__count_rows(&he->sorted_chain);
+ if (he->leaf)
+ browser->nr_callchain_rows -= he->nr_rows;
else
+ browser->nr_hierarchy_entries -= he->nr_rows;
+
+ if (symbol_conf.report_hierarchy)
+ child_rows = hierarchy_count_rows(browser, he, true);
+
+ if (he->unfolded) {
+ if (he->leaf)
+ he->nr_rows = callchain__count_rows(&he->sorted_chain);
+ else
+ he->nr_rows = hierarchy_count_rows(browser, he, false);
+
+ /* account grand children */
+ if (symbol_conf.report_hierarchy)
+ browser->b.nr_entries += child_rows - he->nr_rows;
+
+ if (!he->leaf && he->nr_rows == 0) {
+ he->has_no_entry = true;
+ he->nr_rows = 1;
+ }
+ } else {
+ if (symbol_conf.report_hierarchy)
+ browser->b.nr_entries -= child_rows - he->nr_rows;
+
+ if (he->has_no_entry)
+ he->has_no_entry = false;
+
he->nr_rows = 0;
+ }
browser->b.nr_entries += he->nr_rows;
- browser->nr_callchain_rows += he->nr_rows;
+
+ if (he->leaf)
+ browser->nr_callchain_rows += he->nr_rows;
+ else
+ browser->nr_hierarchy_entries += he->nr_rows;
return true;
}
@@ -422,13 +495,38 @@ static int callchain__set_folding(struct rb_root *chain, bool unfold)
return n;
}
-static void hist_entry__set_folding(struct hist_entry *he, bool unfold)
+static int hierarchy_set_folding(struct hist_browser *hb, struct hist_entry *he,
+ bool unfold __maybe_unused)
+{
+ float percent;
+ struct rb_node *nd;
+ struct hist_entry *child;
+ int n = 0;
+
+ for (nd = rb_first(&he->hroot_out); nd; nd = rb_next(nd)) {
+ child = rb_entry(nd, struct hist_entry, rb_node);
+ percent = hist_entry__get_percent_limit(child);
+ if (!child->filtered && percent >= hb->min_pcnt)
+ n++;
+ }
+
+ return n;
+}
+
+static void hist_entry__set_folding(struct hist_entry *he,
+ struct hist_browser *hb, bool unfold)
{
hist_entry__init_have_children(he);
he->unfolded = unfold ? he->has_children : false;
if (he->has_children) {
- int n = callchain__set_folding(&he->sorted_chain, unfold);
+ int n;
+
+ if (he->leaf)
+ n = callchain__set_folding(&he->sorted_chain, unfold);
+ else
+ n = hierarchy_set_folding(hb, he, unfold);
+
he->nr_rows = unfold ? n : 0;
} else
he->nr_rows = 0;
@@ -438,19 +536,38 @@ static void
__hist_browser__set_folding(struct hist_browser *browser, bool unfold)
{
struct rb_node *nd;
- struct hists *hists = browser->hists;
+ struct hist_entry *he;
+ double percent;
- for (nd = rb_first(&hists->entries);
- (nd = hists__filter_entries(nd, browser->min_pcnt)) != NULL;
- nd = rb_next(nd)) {
- struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
- hist_entry__set_folding(he, unfold);
- browser->nr_callchain_rows += he->nr_rows;
+ nd = rb_first(&browser->hists->entries);
+ while (nd) {
+ he = rb_entry(nd, struct hist_entry, rb_node);
+
+ /* set folding state even if it's currently folded */
+ nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD);
+
+ hist_entry__set_folding(he, browser, unfold);
+
+ percent = hist_entry__get_percent_limit(he);
+ if (he->filtered || percent < browser->min_pcnt)
+ continue;
+
+ if (!he->depth || unfold)
+ browser->nr_hierarchy_entries++;
+ if (he->leaf)
+ browser->nr_callchain_rows += he->nr_rows;
+ else if (unfold && !hist_entry__has_hierarchy_children(he, browser->min_pcnt)) {
+ browser->nr_hierarchy_entries++;
+ he->has_no_entry = true;
+ he->nr_rows = 1;
+ } else
+ he->has_no_entry = false;
}
}
static void hist_browser__set_folding(struct hist_browser *browser, bool unfold)
{
+ browser->nr_hierarchy_entries = 0;
browser->nr_callchain_rows = 0;
__hist_browser__set_folding(browser, unfold);
@@ -657,9 +774,24 @@ static int hist_browser__show_callchain_list(struct hist_browser *browser,
return 1;
}
+static bool check_percent_display(struct rb_node *node, u64 parent_total)
+{
+ struct callchain_node *child;
+
+ if (node == NULL)
+ return false;
+
+ if (rb_next(node))
+ return true;
+
+ child = rb_entry(node, struct callchain_node, rb_node);
+ return callchain_cumul_hits(child) != parent_total;
+}
+
static int hist_browser__show_callchain_flat(struct hist_browser *browser,
struct rb_root *root,
unsigned short row, u64 total,
+ u64 parent_total,
print_callchain_entry_fn print,
struct callchain_print_arg *arg,
check_output_full_fn is_output_full)
@@ -669,7 +801,7 @@ static int hist_browser__show_callchain_flat(struct hist_browser *browser,
bool need_percent;
node = rb_first(root);
- need_percent = node && rb_next(node);
+ need_percent = check_percent_display(node, parent_total);
while (node) {
struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
@@ -763,6 +895,7 @@ static char *hist_browser__folded_callchain_str(struct hist_browser *browser,
static int hist_browser__show_callchain_folded(struct hist_browser *browser,
struct rb_root *root,
unsigned short row, u64 total,
+ u64 parent_total,
print_callchain_entry_fn print,
struct callchain_print_arg *arg,
check_output_full_fn is_output_full)
@@ -772,7 +905,7 @@ static int hist_browser__show_callchain_folded(struct hist_browser *browser,
bool need_percent;
node = rb_first(root);
- need_percent = node && rb_next(node);
+ need_percent = check_percent_display(node, parent_total);
while (node) {
struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
@@ -844,20 +977,24 @@ next:
return row - first_row;
}
-static int hist_browser__show_callchain(struct hist_browser *browser,
+static int hist_browser__show_callchain_graph(struct hist_browser *browser,
struct rb_root *root, int level,
unsigned short row, u64 total,
+ u64 parent_total,
print_callchain_entry_fn print,
struct callchain_print_arg *arg,
check_output_full_fn is_output_full)
{
struct rb_node *node;
int first_row = row, offset = level * LEVEL_OFFSET_STEP;
- u64 new_total;
bool need_percent;
+ u64 percent_total = total;
+
+ if (callchain_param.mode == CHAIN_GRAPH_REL)
+ percent_total = parent_total;
node = rb_first(root);
- need_percent = node && rb_next(node);
+ need_percent = check_percent_display(node, parent_total);
while (node) {
struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
@@ -878,7 +1015,7 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
folded_sign = callchain_list__folded(chain);
row += hist_browser__show_callchain_list(browser, child,
- chain, row, total,
+ chain, row, percent_total,
was_first && need_percent,
offset + extra_offset,
print, arg);
@@ -893,13 +1030,9 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
if (folded_sign == '-') {
const int new_level = level + (extra_offset ? 2 : 1);
- if (callchain_param.mode == CHAIN_GRAPH_REL)
- new_total = child->children_hit;
- else
- new_total = total;
-
- row += hist_browser__show_callchain(browser, &child->rb_root,
- new_level, row, new_total,
+ row += hist_browser__show_callchain_graph(browser, &child->rb_root,
+ new_level, row, total,
+ child->children_hit,
print, arg, is_output_full);
}
if (is_output_full(browser, row))
@@ -910,6 +1043,45 @@ out:
return row - first_row;
}
+static int hist_browser__show_callchain(struct hist_browser *browser,
+ struct hist_entry *entry, int level,
+ unsigned short row,
+ print_callchain_entry_fn print,
+ struct callchain_print_arg *arg,
+ check_output_full_fn is_output_full)
+{
+ u64 total = hists__total_period(entry->hists);
+ u64 parent_total;
+ int printed;
+
+ if (symbol_conf.cumulate_callchain)
+ parent_total = entry->stat_acc->period;
+ else
+ parent_total = entry->stat.period;
+
+ if (callchain_param.mode == CHAIN_FLAT) {
+ printed = hist_browser__show_callchain_flat(browser,
+ &entry->sorted_chain, row,
+ total, parent_total, print, arg,
+ is_output_full);
+ } else if (callchain_param.mode == CHAIN_FOLDED) {
+ printed = hist_browser__show_callchain_folded(browser,
+ &entry->sorted_chain, row,
+ total, parent_total, print, arg,
+ is_output_full);
+ } else {
+ printed = hist_browser__show_callchain_graph(browser,
+ &entry->sorted_chain, level, row,
+ total, parent_total, print, arg,
+ is_output_full);
+ }
+
+ if (arg->is_current_entry)
+ browser->he_selection = entry;
+
+ return printed;
+}
+
struct hpp_arg {
struct ui_browser *b;
char folded_sign;
@@ -1006,7 +1178,6 @@ static int hist_browser__show_entry(struct hist_browser *browser,
struct hist_entry *entry,
unsigned short row)
{
- char s[256];
int printed = 0;
int width = browser->b.width;
char folded_sign = ' ';
@@ -1031,16 +1202,18 @@ static int hist_browser__show_entry(struct hist_browser *browser,
.folded_sign = folded_sign,
.current_entry = current_entry,
};
- struct perf_hpp hpp = {
- .buf = s,
- .size = sizeof(s),
- .ptr = &arg,
- };
int column = 0;
hist_browser__gotorc(browser, row, 0);
- perf_hpp__for_each_format(fmt) {
+ hists__for_each_format(browser->hists, fmt) {
+ char s[2048];
+ struct perf_hpp hpp = {
+ .buf = s,
+ .size = sizeof(s),
+ .ptr = &arg,
+ };
+
if (perf_hpp__should_skip(fmt, entry->hists) ||
column++ < browser->b.horiz_scroll)
continue;
@@ -1065,11 +1238,18 @@ static int hist_browser__show_entry(struct hist_browser *browser,
}
if (fmt->color) {
- width -= fmt->color(fmt, &hpp, entry);
+ int ret = fmt->color(fmt, &hpp, entry);
+ hist_entry__snprintf_alignment(entry, &hpp, fmt, ret);
+ /*
+ * fmt->color() already used ui_browser to
+ * print the non alignment bits, skip it (+ret):
+ */
+ ui_browser__printf(&browser->b, "%s", s + ret);
} else {
- width -= fmt->entry(fmt, &hpp, entry);
+ hist_entry__snprintf_alignment(entry, &hpp, fmt, fmt->entry(fmt, &hpp, entry));
ui_browser__printf(&browser->b, "%s", s);
}
+ width -= hpp.buf - s;
}
/* The scroll bar isn't being used */
@@ -1084,43 +1264,246 @@ static int hist_browser__show_entry(struct hist_browser *browser,
--row_offset;
if (folded_sign == '-' && row != browser->b.rows) {
- u64 total = hists__total_period(entry->hists);
struct callchain_print_arg arg = {
.row_offset = row_offset,
.is_current_entry = current_entry,
};
- if (callchain_param.mode == CHAIN_GRAPH_REL) {
- if (symbol_conf.cumulate_callchain)
- total = entry->stat_acc->period;
- else
- total = entry->stat.period;
- }
-
- if (callchain_param.mode == CHAIN_FLAT) {
- printed += hist_browser__show_callchain_flat(browser,
- &entry->sorted_chain, row, total,
- hist_browser__show_callchain_entry, &arg,
- hist_browser__check_output_full);
- } else if (callchain_param.mode == CHAIN_FOLDED) {
- printed += hist_browser__show_callchain_folded(browser,
- &entry->sorted_chain, row, total,
+ printed += hist_browser__show_callchain(browser, entry, 1, row,
hist_browser__show_callchain_entry, &arg,
hist_browser__check_output_full);
+ }
+
+ return printed;
+}
+
+static int hist_browser__show_hierarchy_entry(struct hist_browser *browser,
+ struct hist_entry *entry,
+ unsigned short row,
+ int level)
+{
+ int printed = 0;
+ int width = browser->b.width;
+ char folded_sign = ' ';
+ bool current_entry = ui_browser__is_current_entry(&browser->b, row);
+ off_t row_offset = entry->row_offset;
+ bool first = true;
+ struct perf_hpp_fmt *fmt;
+ struct perf_hpp_list_node *fmt_node;
+ struct hpp_arg arg = {
+ .b = &browser->b,
+ .current_entry = current_entry,
+ };
+ int column = 0;
+ int hierarchy_indent = (entry->hists->nr_hpp_node - 2) * HIERARCHY_INDENT;
+
+ if (current_entry) {
+ browser->he_selection = entry;
+ browser->selection = &entry->ms;
+ }
+
+ hist_entry__init_have_children(entry);
+ folded_sign = hist_entry__folded(entry);
+ arg.folded_sign = folded_sign;
+
+ if (entry->leaf && row_offset) {
+ row_offset--;
+ goto show_callchain;
+ }
+
+ hist_browser__gotorc(browser, row, 0);
+
+ if (current_entry && browser->b.navkeypressed)
+ ui_browser__set_color(&browser->b, HE_COLORSET_SELECTED);
+ else
+ ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL);
+
+ ui_browser__write_nstring(&browser->b, "", level * HIERARCHY_INDENT);
+ width -= level * HIERARCHY_INDENT;
+
+ /* the first hpp_list_node is for overhead columns */
+ fmt_node = list_first_entry(&entry->hists->hpp_formats,
+ struct perf_hpp_list_node, list);
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
+ char s[2048];
+ struct perf_hpp hpp = {
+ .buf = s,
+ .size = sizeof(s),
+ .ptr = &arg,
+ };
+
+ if (perf_hpp__should_skip(fmt, entry->hists) ||
+ column++ < browser->b.horiz_scroll)
+ continue;
+
+ if (current_entry && browser->b.navkeypressed) {
+ ui_browser__set_color(&browser->b,
+ HE_COLORSET_SELECTED);
} else {
- printed += hist_browser__show_callchain(browser,
- &entry->sorted_chain, 1, row, total,
- hist_browser__show_callchain_entry, &arg,
- hist_browser__check_output_full);
+ ui_browser__set_color(&browser->b,
+ HE_COLORSET_NORMAL);
+ }
+
+ if (first) {
+ ui_browser__printf(&browser->b, "%c", folded_sign);
+ width--;
+ first = false;
+ } else {
+ ui_browser__printf(&browser->b, " ");
+ width -= 2;
+ }
+
+ if (fmt->color) {
+ int ret = fmt->color(fmt, &hpp, entry);
+ hist_entry__snprintf_alignment(entry, &hpp, fmt, ret);
+ /*
+ * fmt->color() already used ui_browser to
+ * print the non alignment bits, skip it (+ret):
+ */
+ ui_browser__printf(&browser->b, "%s", s + ret);
+ } else {
+ int ret = fmt->entry(fmt, &hpp, entry);
+ hist_entry__snprintf_alignment(entry, &hpp, fmt, ret);
+ ui_browser__printf(&browser->b, "%s", s);
+ }
+ width -= hpp.buf - s;
+ }
+
+ ui_browser__write_nstring(&browser->b, "", hierarchy_indent);
+ width -= hierarchy_indent;
+
+ if (column >= browser->b.horiz_scroll) {
+ char s[2048];
+ struct perf_hpp hpp = {
+ .buf = s,
+ .size = sizeof(s),
+ .ptr = &arg,
+ };
+
+ if (current_entry && browser->b.navkeypressed) {
+ ui_browser__set_color(&browser->b,
+ HE_COLORSET_SELECTED);
+ } else {
+ ui_browser__set_color(&browser->b,
+ HE_COLORSET_NORMAL);
}
- if (arg.is_current_entry)
- browser->he_selection = entry;
+ perf_hpp_list__for_each_format(entry->hpp_list, fmt) {
+ ui_browser__write_nstring(&browser->b, "", 2);
+ width -= 2;
+
+ /*
+ * No need to call hist_entry__snprintf_alignment()
+ * since this fmt is always the last column in the
+ * hierarchy mode.
+ */
+ if (fmt->color) {
+ width -= fmt->color(fmt, &hpp, entry);
+ } else {
+ int i = 0;
+
+ width -= fmt->entry(fmt, &hpp, entry);
+ ui_browser__printf(&browser->b, "%s", ltrim(s));
+
+ while (isspace(s[i++]))
+ width++;
+ }
+ }
+ }
+
+ /* The scroll bar isn't being used */
+ if (!browser->b.navkeypressed)
+ width += 1;
+
+ ui_browser__write_nstring(&browser->b, "", width);
+
+ ++row;
+ ++printed;
+
+show_callchain:
+ if (entry->leaf && folded_sign == '-' && row != browser->b.rows) {
+ struct callchain_print_arg carg = {
+ .row_offset = row_offset,
+ };
+
+ printed += hist_browser__show_callchain(browser, entry,
+ level + 1, row,
+ hist_browser__show_callchain_entry, &carg,
+ hist_browser__check_output_full);
}
return printed;
}
+static int hist_browser__show_no_entry(struct hist_browser *browser,
+ unsigned short row, int level)
+{
+ int width = browser->b.width;
+ bool current_entry = ui_browser__is_current_entry(&browser->b, row);
+ bool first = true;
+ int column = 0;
+ int ret;
+ struct perf_hpp_fmt *fmt;
+ struct perf_hpp_list_node *fmt_node;
+ int indent = browser->hists->nr_hpp_node - 2;
+
+ if (current_entry) {
+ browser->he_selection = NULL;
+ browser->selection = NULL;
+ }
+
+ hist_browser__gotorc(browser, row, 0);
+
+ if (current_entry && browser->b.navkeypressed)
+ ui_browser__set_color(&browser->b, HE_COLORSET_SELECTED);
+ else
+ ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL);
+
+ ui_browser__write_nstring(&browser->b, "", level * HIERARCHY_INDENT);
+ width -= level * HIERARCHY_INDENT;
+
+ /* the first hpp_list_node is for overhead columns */
+ fmt_node = list_first_entry(&browser->hists->hpp_formats,
+ struct perf_hpp_list_node, list);
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
+ if (perf_hpp__should_skip(fmt, browser->hists) ||
+ column++ < browser->b.horiz_scroll)
+ continue;
+
+ ret = fmt->width(fmt, NULL, hists_to_evsel(browser->hists));
+
+ if (first) {
+ /* for folded sign */
+ first = false;
+ ret++;
+ } else {
+ /* space between columns */
+ ret += 2;
+ }
+
+ ui_browser__write_nstring(&browser->b, "", ret);
+ width -= ret;
+ }
+
+ ui_browser__write_nstring(&browser->b, "", indent * HIERARCHY_INDENT);
+ width -= indent * HIERARCHY_INDENT;
+
+ if (column >= browser->b.horiz_scroll) {
+ char buf[32];
+
+ ret = snprintf(buf, sizeof(buf), "no entry >= %.2f%%", browser->min_pcnt);
+ ui_browser__printf(&browser->b, " %s", buf);
+ width -= ret + 2;
+ }
+
+ /* The scroll bar isn't being used */
+ if (!browser->b.navkeypressed)
+ width += 1;
+
+ ui_browser__write_nstring(&browser->b, "", width);
+ return 1;
+}
+
static int advance_hpp_check(struct perf_hpp *hpp, int inc)
{
advance_hpp(hpp, inc);
@@ -1144,7 +1527,7 @@ static int hists_browser__scnprintf_headers(struct hist_browser *browser, char *
return ret;
}
- perf_hpp__for_each_format(fmt) {
+ hists__for_each_format(browser->hists, fmt) {
if (perf_hpp__should_skip(fmt, hists) || column++ < browser->b.horiz_scroll)
continue;
@@ -1160,11 +1543,96 @@ static int hists_browser__scnprintf_headers(struct hist_browser *browser, char *
return ret;
}
+static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *browser, char *buf, size_t size)
+{
+ struct hists *hists = browser->hists;
+ struct perf_hpp dummy_hpp = {
+ .buf = buf,
+ .size = size,
+ };
+ struct perf_hpp_fmt *fmt;
+ struct perf_hpp_list_node *fmt_node;
+ size_t ret = 0;
+ int column = 0;
+ int indent = hists->nr_hpp_node - 2;
+ bool first_node, first_col;
+
+ ret = scnprintf(buf, size, " ");
+ if (advance_hpp_check(&dummy_hpp, ret))
+ return ret;
+
+ /* the first hpp_list_node is for overhead columns */
+ fmt_node = list_first_entry(&hists->hpp_formats,
+ struct perf_hpp_list_node, list);
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
+ if (column++ < browser->b.horiz_scroll)
+ continue;
+
+ ret = fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
+ if (advance_hpp_check(&dummy_hpp, ret))
+ break;
+
+ ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, " ");
+ if (advance_hpp_check(&dummy_hpp, ret))
+ break;
+ }
+
+ ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "%*s",
+ indent * HIERARCHY_INDENT, "");
+ if (advance_hpp_check(&dummy_hpp, ret))
+ return ret;
+
+ first_node = true;
+ list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
+ if (!first_node) {
+ ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, " / ");
+ if (advance_hpp_check(&dummy_hpp, ret))
+ break;
+ }
+ first_node = false;
+
+ first_col = true;
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
+ char *start;
+
+ if (perf_hpp__should_skip(fmt, hists))
+ continue;
+
+ if (!first_col) {
+ ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "+");
+ if (advance_hpp_check(&dummy_hpp, ret))
+ break;
+ }
+ first_col = false;
+
+ ret = fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
+ dummy_hpp.buf[ret] = '\0';
+ rtrim(dummy_hpp.buf);
+
+ start = ltrim(dummy_hpp.buf);
+ ret = strlen(start);
+
+ if (start != dummy_hpp.buf)
+ memmove(dummy_hpp.buf, start, ret + 1);
+
+ if (advance_hpp_check(&dummy_hpp, ret))
+ break;
+ }
+ }
+
+ return ret;
+}
+
static void hist_browser__show_headers(struct hist_browser *browser)
{
char headers[1024];
- hists_browser__scnprintf_headers(browser, headers, sizeof(headers));
+ if (symbol_conf.report_hierarchy)
+ hists_browser__scnprintf_hierarchy_headers(browser, headers,
+ sizeof(headers));
+ else
+ hists_browser__scnprintf_headers(browser, headers,
+ sizeof(headers));
ui_browser__gotorc(&browser->b, 0, 0);
ui_browser__set_color(&browser->b, HE_COLORSET_ROOT);
ui_browser__write_nstring(&browser->b, headers, browser->b.width + 1);
@@ -1196,18 +1664,34 @@ static unsigned int hist_browser__refresh(struct ui_browser *browser)
hb->he_selection = NULL;
hb->selection = NULL;
- for (nd = browser->top; nd; nd = rb_next(nd)) {
+ for (nd = browser->top; nd; nd = rb_hierarchy_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
float percent;
- if (h->filtered)
+ if (h->filtered) {
+ /* let it move to sibling */
+ h->unfolded = false;
continue;
+ }
percent = hist_entry__get_percent_limit(h);
if (percent < hb->min_pcnt)
continue;
- row += hist_browser__show_entry(hb, h, row);
+ if (symbol_conf.report_hierarchy) {
+ row += hist_browser__show_hierarchy_entry(hb, h, row,
+ h->depth);
+ if (row == browser->rows)
+ break;
+
+ if (h->has_no_entry) {
+ hist_browser__show_no_entry(hb, row, h->depth + 1);
+ row++;
+ }
+ } else {
+ row += hist_browser__show_entry(hb, h, row);
+ }
+
if (row == browser->rows)
break;
}
@@ -1225,7 +1709,14 @@ static struct rb_node *hists__filter_entries(struct rb_node *nd,
if (!h->filtered && percent >= min_pcnt)
return nd;
- nd = rb_next(nd);
+ /*
+ * If it's filtered, its all children also were filtered.
+ * So move to sibling node.
+ */
+ if (rb_next(nd))
+ nd = rb_next(nd);
+ else
+ nd = rb_hierarchy_next(nd);
}
return NULL;
@@ -1241,7 +1732,7 @@ static struct rb_node *hists__filter_prev_entries(struct rb_node *nd,
if (!h->filtered && percent >= min_pcnt)
return nd;
- nd = rb_prev(nd);
+ nd = rb_hierarchy_prev(nd);
}
return NULL;
@@ -1271,8 +1762,8 @@ static void ui_browser__hists_seek(struct ui_browser *browser,
nd = browser->top;
goto do_offset;
case SEEK_END:
- nd = hists__filter_prev_entries(rb_last(browser->entries),
- hb->min_pcnt);
+ nd = rb_hierarchy_last(rb_last(browser->entries));
+ nd = hists__filter_prev_entries(nd, hb->min_pcnt);
first = false;
break;
default:
@@ -1306,7 +1797,7 @@ do_offset:
if (offset > 0) {
do {
h = rb_entry(nd, struct hist_entry, rb_node);
- if (h->unfolded) {
+ if (h->unfolded && h->leaf) {
u16 remaining = h->nr_rows - h->row_offset;
if (offset > remaining) {
offset -= remaining;
@@ -1318,7 +1809,8 @@ do_offset:
break;
}
}
- nd = hists__filter_entries(rb_next(nd), hb->min_pcnt);
+ nd = hists__filter_entries(rb_hierarchy_next(nd),
+ hb->min_pcnt);
if (nd == NULL)
break;
--offset;
@@ -1327,7 +1819,7 @@ do_offset:
} else if (offset < 0) {
while (1) {
h = rb_entry(nd, struct hist_entry, rb_node);
- if (h->unfolded) {
+ if (h->unfolded && h->leaf) {
if (first) {
if (-offset > h->row_offset) {
offset += h->row_offset;
@@ -1351,7 +1843,7 @@ do_offset:
}
}
- nd = hists__filter_prev_entries(rb_prev(nd),
+ nd = hists__filter_prev_entries(rb_hierarchy_prev(nd),
hb->min_pcnt);
if (nd == NULL)
break;
@@ -1364,7 +1856,7 @@ do_offset:
* row_offset at its last entry.
*/
h = rb_entry(nd, struct hist_entry, rb_node);
- if (h->unfolded)
+ if (h->unfolded && h->leaf)
h->row_offset = h->nr_rows;
break;
}
@@ -1378,17 +1870,14 @@ do_offset:
}
static int hist_browser__fprintf_callchain(struct hist_browser *browser,
- struct hist_entry *he, FILE *fp)
+ struct hist_entry *he, FILE *fp,
+ int level)
{
- u64 total = hists__total_period(he->hists);
struct callchain_print_arg arg = {
.fp = fp,
};
- if (symbol_conf.cumulate_callchain)
- total = he->stat_acc->period;
-
- hist_browser__show_callchain(browser, &he->sorted_chain, 1, 0, total,
+ hist_browser__show_callchain(browser, he, level, 0,
hist_browser__fprintf_callchain_entry, &arg,
hist_browser__check_dump_full);
return arg.printed;
@@ -1414,7 +1903,7 @@ static int hist_browser__fprintf_entry(struct hist_browser *browser,
if (symbol_conf.use_callchain)
printed += fprintf(fp, "%c ", folded_sign);
- perf_hpp__for_each_format(fmt) {
+ hists__for_each_format(browser->hists, fmt) {
if (perf_hpp__should_skip(fmt, he->hists))
continue;
@@ -1425,12 +1914,71 @@ static int hist_browser__fprintf_entry(struct hist_browser *browser,
first = false;
ret = fmt->entry(fmt, &hpp, he);
+ ret = hist_entry__snprintf_alignment(he, &hpp, fmt, ret);
advance_hpp(&hpp, ret);
}
- printed += fprintf(fp, "%s\n", rtrim(s));
+ printed += fprintf(fp, "%s\n", s);
if (folded_sign == '-')
- printed += hist_browser__fprintf_callchain(browser, he, fp);
+ printed += hist_browser__fprintf_callchain(browser, he, fp, 1);
+
+ return printed;
+}
+
+
+static int hist_browser__fprintf_hierarchy_entry(struct hist_browser *browser,
+ struct hist_entry *he,
+ FILE *fp, int level)
+{
+ char s[8192];
+ int printed = 0;
+ char folded_sign = ' ';
+ struct perf_hpp hpp = {
+ .buf = s,
+ .size = sizeof(s),
+ };
+ struct perf_hpp_fmt *fmt;
+ struct perf_hpp_list_node *fmt_node;
+ bool first = true;
+ int ret;
+ int hierarchy_indent = (he->hists->nr_hpp_node - 2) * HIERARCHY_INDENT;
+
+ printed = fprintf(fp, "%*s", level * HIERARCHY_INDENT, "");
+
+ folded_sign = hist_entry__folded(he);
+ printed += fprintf(fp, "%c", folded_sign);
+
+ /* the first hpp_list_node is for overhead columns */
+ fmt_node = list_first_entry(&he->hists->hpp_formats,
+ struct perf_hpp_list_node, list);
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
+ if (!first) {
+ ret = scnprintf(hpp.buf, hpp.size, " ");
+ advance_hpp(&hpp, ret);
+ } else
+ first = false;
+
+ ret = fmt->entry(fmt, &hpp, he);
+ advance_hpp(&hpp, ret);
+ }
+
+ ret = scnprintf(hpp.buf, hpp.size, "%*s", hierarchy_indent, "");
+ advance_hpp(&hpp, ret);
+
+ perf_hpp_list__for_each_format(he->hpp_list, fmt) {
+ ret = scnprintf(hpp.buf, hpp.size, " ");
+ advance_hpp(&hpp, ret);
+
+ ret = fmt->entry(fmt, &hpp, he);
+ advance_hpp(&hpp, ret);
+ }
+
+ printed += fprintf(fp, "%s\n", rtrim(s));
+
+ if (he->leaf && folded_sign == '-') {
+ printed += hist_browser__fprintf_callchain(browser, he, fp,
+ he->depth + 1);
+ }
return printed;
}
@@ -1444,8 +1992,16 @@ static int hist_browser__fprintf(struct hist_browser *browser, FILE *fp)
while (nd) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
- printed += hist_browser__fprintf_entry(browser, h, fp);
- nd = hists__filter_entries(rb_next(nd), browser->min_pcnt);
+ if (symbol_conf.report_hierarchy) {
+ printed += hist_browser__fprintf_hierarchy_entry(browser,
+ h, fp,
+ h->depth);
+ } else {
+ printed += hist_browser__fprintf_entry(browser, h, fp);
+ }
+
+ nd = hists__filter_entries(rb_hierarchy_next(nd),
+ browser->min_pcnt);
}
return printed;
@@ -1580,11 +2136,18 @@ static int hists__browser_title(struct hists *hists,
if (hists->uid_filter_str)
printed += snprintf(bf + printed, size - printed,
", UID: %s", hists->uid_filter_str);
- if (thread)
- printed += scnprintf(bf + printed, size - printed,
+ if (thread) {
+ if (sort__has_thread) {
+ printed += scnprintf(bf + printed, size - printed,
", Thread: %s(%d)",
(thread->comm_set ? thread__comm_str(thread) : ""),
thread->tid);
+ } else {
+ printed += scnprintf(bf + printed, size - printed,
+ ", Thread: %s",
+ (thread->comm_set ? thread__comm_str(thread) : ""));
+ }
+ }
if (dso)
printed += scnprintf(bf + printed, size - printed,
", DSO: %s", dso->short_name);
@@ -1759,15 +2322,24 @@ do_zoom_thread(struct hist_browser *browser, struct popup_action *act)
{
struct thread *thread = act->thread;
+ if ((!sort__has_thread && !sort__has_comm) || thread == NULL)
+ return 0;
+
if (browser->hists->thread_filter) {
pstack__remove(browser->pstack, &browser->hists->thread_filter);
perf_hpp__set_elide(HISTC_THREAD, false);
thread__zput(browser->hists->thread_filter);
ui_helpline__pop();
} else {
- ui_helpline__fpush("To zoom out press ESC or ENTER + \"Zoom out of %s(%d) thread\"",
- thread->comm_set ? thread__comm_str(thread) : "",
- thread->tid);
+ if (sort__has_thread) {
+ ui_helpline__fpush("To zoom out press ESC or ENTER + \"Zoom out of %s(%d) thread\"",
+ thread->comm_set ? thread__comm_str(thread) : "",
+ thread->tid);
+ } else {
+ ui_helpline__fpush("To zoom out press ESC or ENTER + \"Zoom out of %s thread\"",
+ thread->comm_set ? thread__comm_str(thread) : "");
+ }
+
browser->hists->thread_filter = thread__get(thread);
perf_hpp__set_elide(HISTC_THREAD, false);
pstack__push(browser->pstack, &browser->hists->thread_filter);
@@ -1782,13 +2354,22 @@ static int
add_thread_opt(struct hist_browser *browser, struct popup_action *act,
char **optstr, struct thread *thread)
{
- if (thread == NULL)
+ int ret;
+
+ if ((!sort__has_thread && !sort__has_comm) || thread == NULL)
return 0;
- if (asprintf(optstr, "Zoom %s %s(%d) thread",
- browser->hists->thread_filter ? "out of" : "into",
- thread->comm_set ? thread__comm_str(thread) : "",
- thread->tid) < 0)
+ if (sort__has_thread) {
+ ret = asprintf(optstr, "Zoom %s %s(%d) thread",
+ browser->hists->thread_filter ? "out of" : "into",
+ thread->comm_set ? thread__comm_str(thread) : "",
+ thread->tid);
+ } else {
+ ret = asprintf(optstr, "Zoom %s %s thread",
+ browser->hists->thread_filter ? "out of" : "into",
+ thread->comm_set ? thread__comm_str(thread) : "");
+ }
+ if (ret < 0)
return 0;
act->thread = thread;
@@ -1801,6 +2382,9 @@ do_zoom_dso(struct hist_browser *browser, struct popup_action *act)
{
struct map *map = act->ms.map;
+ if (!sort__has_dso || map == NULL)
+ return 0;
+
if (browser->hists->dso_filter) {
pstack__remove(browser->pstack, &browser->hists->dso_filter);
perf_hpp__set_elide(HISTC_DSO, false);
@@ -1825,7 +2409,7 @@ static int
add_dso_opt(struct hist_browser *browser, struct popup_action *act,
char **optstr, struct map *map)
{
- if (map == NULL)
+ if (!sort__has_dso || map == NULL)
return 0;
if (asprintf(optstr, "Zoom %s %s DSO",
@@ -1850,7 +2434,7 @@ static int
add_map_opt(struct hist_browser *browser __maybe_unused,
struct popup_action *act, char **optstr, struct map *map)
{
- if (map == NULL)
+ if (!sort__has_dso || map == NULL)
return 0;
if (asprintf(optstr, "Browse map details") < 0)
@@ -1952,6 +2536,9 @@ add_exit_opt(struct hist_browser *browser __maybe_unused,
static int
do_zoom_socket(struct hist_browser *browser, struct popup_action *act)
{
+ if (!sort__has_socket || act->socket < 0)
+ return 0;
+
if (browser->hists->socket_filter > -1) {
pstack__remove(browser->pstack, &browser->hists->socket_filter);
browser->hists->socket_filter = -1;
@@ -1971,7 +2558,7 @@ static int
add_socket_opt(struct hist_browser *browser, struct popup_action *act,
char **optstr, int socket_id)
{
- if (socket_id < 0)
+ if (!sort__has_socket || socket_id < 0)
return 0;
if (asprintf(optstr, "Zoom %s Processor Socket %d",
@@ -1989,17 +2576,60 @@ static void hist_browser__update_nr_entries(struct hist_browser *hb)
u64 nr_entries = 0;
struct rb_node *nd = rb_first(&hb->hists->entries);
- if (hb->min_pcnt == 0) {
+ if (hb->min_pcnt == 0 && !symbol_conf.report_hierarchy) {
hb->nr_non_filtered_entries = hb->hists->nr_non_filtered_entries;
return;
}
while ((nd = hists__filter_entries(nd, hb->min_pcnt)) != NULL) {
nr_entries++;
- nd = rb_next(nd);
+ nd = rb_hierarchy_next(nd);
}
hb->nr_non_filtered_entries = nr_entries;
+ hb->nr_hierarchy_entries = nr_entries;
+}
+
+static void hist_browser__update_percent_limit(struct hist_browser *hb,
+ double percent)
+{
+ struct hist_entry *he;
+ struct rb_node *nd = rb_first(&hb->hists->entries);
+ u64 total = hists__total_period(hb->hists);
+ u64 min_callchain_hits = total * (percent / 100);
+
+ hb->min_pcnt = callchain_param.min_percent = percent;
+
+ while ((nd = hists__filter_entries(nd, hb->min_pcnt)) != NULL) {
+ he = rb_entry(nd, struct hist_entry, rb_node);
+
+ if (he->has_no_entry) {
+ he->has_no_entry = false;
+ he->nr_rows = 0;
+ }
+
+ if (!he->leaf || !symbol_conf.use_callchain)
+ goto next;
+
+ if (callchain_param.mode == CHAIN_GRAPH_REL) {
+ total = he->stat.period;
+
+ if (symbol_conf.cumulate_callchain)
+ total = he->stat_acc->period;
+
+ min_callchain_hits = total * (percent / 100);
+ }
+
+ callchain_param.sort(&he->sorted_chain, he->callchain,
+ min_callchain_hits, &callchain_param);
+
+next:
+ nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD);
+
+ /* force to re-evaluate folding state of callchains */
+ he->init_have_children = false;
+ hist_entry__set_folding(he, hb, false);
+ }
}
static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
@@ -2037,6 +2667,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
"E Expand all callchains\n" \
"F Toggle percentage of filtered entries\n" \
"H Display column headers\n" \
+ "L Change percent limit\n" \
"m Display context menu\n" \
"S Zoom into current Processor Socket\n" \
@@ -2077,7 +2708,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
memset(options, 0, sizeof(options));
memset(actions, 0, sizeof(actions));
- perf_hpp__for_each_format(fmt) {
+ hists__for_each_format(browser->hists, fmt) {
perf_hpp__reset_width(fmt, hists);
/*
* This is done just once, and activates the horizontal scrolling
@@ -2192,6 +2823,24 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
top->zero = !top->zero;
}
continue;
+ case 'L':
+ if (ui_browser__input_window("Percent Limit",
+ "Please enter the value you want to hide entries under that percent.",
+ buf, "ENTER: OK, ESC: Cancel",
+ delay_secs * 2) == K_ENTER) {
+ char *end;
+ double new_percent = strtod(buf, &end);
+
+ if (new_percent < 0 || new_percent > 100) {
+ ui_browser__warning(&browser->b, delay_secs * 2,
+ "Invalid percent: %.2f", new_percent);
+ continue;
+ }
+
+ hist_browser__update_percent_limit(browser, new_percent);
+ hist_browser__reset(browser);
+ }
+ continue;
case K_F1:
case 'h':
case '?':
@@ -2263,10 +2912,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
continue;
}
- if (!sort__has_sym)
- goto add_exit_option;
-
- if (browser->selection == NULL)
+ if (!sort__has_sym || browser->selection == NULL)
goto skip_annotation;
if (sort__mode == SORT_MODE__BRANCH) {
@@ -2306,11 +2952,16 @@ skip_annotation:
&options[nr_options],
socked_id);
/* perf script support */
+ if (!is_report_browser(hbt))
+ goto skip_scripting;
+
if (browser->he_selection) {
- nr_options += add_script_opt(browser,
- &actions[nr_options],
- &options[nr_options],
- thread, NULL);
+ if (sort__has_thread && thread) {
+ nr_options += add_script_opt(browser,
+ &actions[nr_options],
+ &options[nr_options],
+ thread, NULL);
+ }
/*
* Note that browser->selection != NULL
* when browser->he_selection is not NULL,
@@ -2320,16 +2971,18 @@ skip_annotation:
*
* See hist_browser__show_entry.
*/
- nr_options += add_script_opt(browser,
- &actions[nr_options],
- &options[nr_options],
- NULL, browser->selection->sym);
+ if (sort__has_sym && browser->selection->sym) {
+ nr_options += add_script_opt(browser,
+ &actions[nr_options],
+ &options[nr_options],
+ NULL, browser->selection->sym);
+ }
}
nr_options += add_script_opt(browser, &actions[nr_options],
&options[nr_options], NULL, NULL);
nr_options += add_switch_opt(browser, &actions[nr_options],
&options[nr_options]);
-add_exit_option:
+skip_scripting:
nr_options += add_exit_opt(browser, &actions[nr_options],
&options[nr_options]);
diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c
index 0f8dcfdfb10f..bd9bf7e343b1 100644
--- a/tools/perf/ui/gtk/hists.c
+++ b/tools/perf/ui/gtk/hists.c
@@ -306,7 +306,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
nr_cols = 0;
- perf_hpp__for_each_format(fmt)
+ hists__for_each_format(hists, fmt)
col_types[nr_cols++] = G_TYPE_STRING;
store = gtk_tree_store_newv(nr_cols, col_types);
@@ -317,7 +317,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
col_idx = 0;
- perf_hpp__for_each_format(fmt) {
+ hists__for_each_format(hists, fmt) {
if (perf_hpp__should_skip(fmt, hists))
continue;
@@ -367,7 +367,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
col_idx = 0;
- perf_hpp__for_each_format(fmt) {
+ hists__for_each_format(hists, fmt) {
if (perf_hpp__should_skip(fmt, h->hists))
continue;
@@ -396,6 +396,194 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
gtk_container_add(GTK_CONTAINER(window), view);
}
+static void perf_gtk__add_hierarchy_entries(struct hists *hists,
+ struct rb_root *root,
+ GtkTreeStore *store,
+ GtkTreeIter *parent,
+ struct perf_hpp *hpp,
+ float min_pcnt)
+{
+ int col_idx = 0;
+ struct rb_node *node;
+ struct hist_entry *he;
+ struct perf_hpp_fmt *fmt;
+ struct perf_hpp_list_node *fmt_node;
+ u64 total = hists__total_period(hists);
+ int size;
+
+ for (node = rb_first(root); node; node = rb_next(node)) {
+ GtkTreeIter iter;
+ float percent;
+ char *bf;
+
+ he = rb_entry(node, struct hist_entry, rb_node);
+ if (he->filtered)
+ continue;
+
+ percent = hist_entry__get_percent_limit(he);
+ if (percent < min_pcnt)
+ continue;
+
+ gtk_tree_store_append(store, &iter, parent);
+
+ col_idx = 0;
+
+ /* the first hpp_list_node is for overhead columns */
+ fmt_node = list_first_entry(&hists->hpp_formats,
+ struct perf_hpp_list_node, list);
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
+ if (fmt->color)
+ fmt->color(fmt, hpp, he);
+ else
+ fmt->entry(fmt, hpp, he);
+
+ gtk_tree_store_set(store, &iter, col_idx++, hpp->buf, -1);
+ }
+
+ bf = hpp->buf;
+ size = hpp->size;
+ perf_hpp_list__for_each_format(he->hpp_list, fmt) {
+ int ret;
+
+ if (fmt->color)
+ ret = fmt->color(fmt, hpp, he);
+ else
+ ret = fmt->entry(fmt, hpp, he);
+
+ snprintf(hpp->buf + ret, hpp->size - ret, " ");
+ advance_hpp(hpp, ret + 2);
+ }
+
+ gtk_tree_store_set(store, &iter, col_idx, ltrim(rtrim(bf)), -1);
+
+ if (!he->leaf) {
+ hpp->buf = bf;
+ hpp->size = size;
+
+ perf_gtk__add_hierarchy_entries(hists, &he->hroot_out,
+ store, &iter, hpp,
+ min_pcnt);
+
+ if (!hist_entry__has_hierarchy_children(he, min_pcnt)) {
+ char buf[32];
+ GtkTreeIter child;
+
+ snprintf(buf, sizeof(buf), "no entry >= %.2f%%",
+ min_pcnt);
+
+ gtk_tree_store_append(store, &child, &iter);
+ gtk_tree_store_set(store, &child, col_idx, buf, -1);
+ }
+ }
+
+ if (symbol_conf.use_callchain && he->leaf) {
+ if (callchain_param.mode == CHAIN_GRAPH_REL)
+ total = symbol_conf.cumulate_callchain ?
+ he->stat_acc->period : he->stat.period;
+
+ perf_gtk__add_callchain(&he->sorted_chain, store, &iter,
+ col_idx, total);
+ }
+ }
+
+}
+
+static void perf_gtk__show_hierarchy(GtkWidget *window, struct hists *hists,
+ float min_pcnt)
+{
+ struct perf_hpp_fmt *fmt;
+ struct perf_hpp_list_node *fmt_node;
+ GType col_types[MAX_COLUMNS];
+ GtkCellRenderer *renderer;
+ GtkTreeStore *store;
+ GtkWidget *view;
+ int col_idx;
+ int nr_cols = 0;
+ char s[512];
+ char buf[512];
+ bool first_node, first_col;
+ struct perf_hpp hpp = {
+ .buf = s,
+ .size = sizeof(s),
+ };
+
+ hists__for_each_format(hists, fmt) {
+ if (perf_hpp__is_sort_entry(fmt) ||
+ perf_hpp__is_dynamic_entry(fmt))
+ break;
+
+ col_types[nr_cols++] = G_TYPE_STRING;
+ }
+ col_types[nr_cols++] = G_TYPE_STRING;
+
+ store = gtk_tree_store_newv(nr_cols, col_types);
+ view = gtk_tree_view_new();
+ renderer = gtk_cell_renderer_text_new();
+
+ col_idx = 0;
+
+ /* the first hpp_list_node is for overhead columns */
+ fmt_node = list_first_entry(&hists->hpp_formats,
+ struct perf_hpp_list_node, list);
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
+ gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
+ -1, fmt->name,
+ renderer, "markup",
+ col_idx++, NULL);
+ }
+
+ /* construct merged column header since sort keys share single column */
+ buf[0] = '\0';
+ first_node = true;
+ list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
+ if (!first_node)
+ strcat(buf, " / ");
+ first_node = false;
+
+ first_col = true;
+ perf_hpp_list__for_each_format(&fmt_node->hpp ,fmt) {
+ if (perf_hpp__should_skip(fmt, hists))
+ continue;
+
+ if (!first_col)
+ strcat(buf, "+");
+ first_col = false;
+
+ fmt->header(fmt, &hpp, hists_to_evsel(hists));
+ strcat(buf, ltrim(rtrim(hpp.buf)));
+ }
+ }
+
+ gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
+ -1, buf,
+ renderer, "markup",
+ col_idx++, NULL);
+
+ for (col_idx = 0; col_idx < nr_cols; col_idx++) {
+ GtkTreeViewColumn *column;
+
+ column = gtk_tree_view_get_column(GTK_TREE_VIEW(view), col_idx);
+ gtk_tree_view_column_set_resizable(column, TRUE);
+
+ if (col_idx == 0) {
+ gtk_tree_view_set_expander_column(GTK_TREE_VIEW(view),
+ column);
+ }
+ }
+
+ gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store));
+ g_object_unref(GTK_TREE_MODEL(store));
+
+ perf_gtk__add_hierarchy_entries(hists, &hists->entries, store,
+ NULL, &hpp, min_pcnt);
+
+ gtk_tree_view_set_rules_hint(GTK_TREE_VIEW(view), TRUE);
+
+ g_signal_connect(view, "row-activated",
+ G_CALLBACK(on_row_activated), NULL);
+ gtk_container_add(GTK_CONTAINER(window), view);
+}
+
int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
const char *help,
struct hist_browser_timer *hbt __maybe_unused,
@@ -463,7 +651,10 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
GTK_POLICY_AUTOMATIC,
GTK_POLICY_AUTOMATIC);
- perf_gtk__show_hists(scrolled_window, hists, min_pcnt);
+ if (symbol_conf.report_hierarchy)
+ perf_gtk__show_hierarchy(scrolled_window, hists, min_pcnt);
+ else
+ perf_gtk__show_hists(scrolled_window, hists, min_pcnt);
tab_label = gtk_label_new(evname);
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index bf2a66e254ea..3baeaa6e71b5 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -5,6 +5,7 @@
#include "../util/util.h"
#include "../util/sort.h"
#include "../util/evsel.h"
+#include "../util/evlist.h"
/* hist period print (hpp) functions */
@@ -371,7 +372,20 @@ static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
return 0;
}
-#define HPP__COLOR_PRINT_FNS(_name, _fn) \
+static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
+{
+ return a->header == hpp__header_fn;
+}
+
+static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
+{
+ if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
+ return false;
+
+ return a->idx == b->idx;
+}
+
+#define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \
{ \
.name = _name, \
.header = hpp__header_fn, \
@@ -381,9 +395,11 @@ static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
.cmp = hpp__nop_cmp, \
.collapse = hpp__nop_cmp, \
.sort = hpp__sort_ ## _fn, \
+ .idx = PERF_HPP__ ## _idx, \
+ .equal = hpp__equal, \
}
-#define HPP__COLOR_ACC_PRINT_FNS(_name, _fn) \
+#define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \
{ \
.name = _name, \
.header = hpp__header_fn, \
@@ -393,9 +409,11 @@ static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
.cmp = hpp__nop_cmp, \
.collapse = hpp__nop_cmp, \
.sort = hpp__sort_ ## _fn, \
+ .idx = PERF_HPP__ ## _idx, \
+ .equal = hpp__equal, \
}
-#define HPP__PRINT_FNS(_name, _fn) \
+#define HPP__PRINT_FNS(_name, _fn, _idx) \
{ \
.name = _name, \
.header = hpp__header_fn, \
@@ -404,22 +422,25 @@ static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
.cmp = hpp__nop_cmp, \
.collapse = hpp__nop_cmp, \
.sort = hpp__sort_ ## _fn, \
+ .idx = PERF_HPP__ ## _idx, \
+ .equal = hpp__equal, \
}
struct perf_hpp_fmt perf_hpp__format[] = {
- HPP__COLOR_PRINT_FNS("Overhead", overhead),
- HPP__COLOR_PRINT_FNS("sys", overhead_sys),
- HPP__COLOR_PRINT_FNS("usr", overhead_us),
- HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys),
- HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us),
- HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc),
- HPP__PRINT_FNS("Samples", samples),
- HPP__PRINT_FNS("Period", period)
+ HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
+ HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
+ HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
+ HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
+ HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
+ HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
+ HPP__PRINT_FNS("Samples", samples, SAMPLES),
+ HPP__PRINT_FNS("Period", period, PERIOD)
};
-LIST_HEAD(perf_hpp__list);
-LIST_HEAD(perf_hpp__sort_list);
-
+struct perf_hpp_list perf_hpp_list = {
+ .fields = LIST_HEAD_INIT(perf_hpp_list.fields),
+ .sorts = LIST_HEAD_INIT(perf_hpp_list.sorts),
+};
#undef HPP__COLOR_PRINT_FNS
#undef HPP__COLOR_ACC_PRINT_FNS
@@ -485,63 +506,60 @@ void perf_hpp__init(void)
hpp_dimension__add_output(PERF_HPP__PERIOD);
}
-void perf_hpp__column_register(struct perf_hpp_fmt *format)
+void perf_hpp_list__column_register(struct perf_hpp_list *list,
+ struct perf_hpp_fmt *format)
{
- list_add_tail(&format->list, &perf_hpp__list);
+ list_add_tail(&format->list, &list->fields);
}
-void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
+void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
+ struct perf_hpp_fmt *format)
{
- list_del(&format->list);
+ list_add_tail(&format->sort_list, &list->sorts);
}
-void perf_hpp__register_sort_field(struct perf_hpp_fmt *format)
-{
- list_add_tail(&format->sort_list, &perf_hpp__sort_list);
-}
-
-void perf_hpp__column_enable(unsigned col)
-{
- BUG_ON(col >= PERF_HPP__MAX_INDEX);
- perf_hpp__column_register(&perf_hpp__format[col]);
-}
-
-void perf_hpp__column_disable(unsigned col)
+void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
{
- BUG_ON(col >= PERF_HPP__MAX_INDEX);
- perf_hpp__column_unregister(&perf_hpp__format[col]);
+ list_del(&format->list);
}
void perf_hpp__cancel_cumulate(void)
{
+ struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp;
+
if (is_strict_order(field_order))
return;
- perf_hpp__column_disable(PERF_HPP__OVERHEAD_ACC);
- perf_hpp__format[PERF_HPP__OVERHEAD].name = "Overhead";
+ ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
+ acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
+
+ perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
+ if (acc->equal(acc, fmt)) {
+ perf_hpp__column_unregister(fmt);
+ continue;
+ }
+
+ if (ovh->equal(ovh, fmt))
+ fmt->name = "Overhead";
+ }
}
-void perf_hpp__setup_output_field(void)
+static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
+{
+ return a->equal && a->equal(a, b);
+}
+
+void perf_hpp__setup_output_field(struct perf_hpp_list *list)
{
struct perf_hpp_fmt *fmt;
/* append sort keys to output field */
- perf_hpp__for_each_sort_list(fmt) {
- if (!list_empty(&fmt->list))
- continue;
-
- /*
- * sort entry fields are dynamically created,
- * so they can share a same sort key even though
- * the list is empty.
- */
- if (perf_hpp__is_sort_entry(fmt)) {
- struct perf_hpp_fmt *pos;
+ perf_hpp_list__for_each_sort_list(list, fmt) {
+ struct perf_hpp_fmt *pos;
- perf_hpp__for_each_format(pos) {
- if (perf_hpp__same_sort_entry(pos, fmt))
- goto next;
- }
+ perf_hpp_list__for_each_format(list, pos) {
+ if (fmt_equal(fmt, pos))
+ goto next;
}
perf_hpp__column_register(fmt);
@@ -550,27 +568,17 @@ next:
}
}
-void perf_hpp__append_sort_keys(void)
+void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
{
struct perf_hpp_fmt *fmt;
/* append output fields to sort keys */
- perf_hpp__for_each_format(fmt) {
- if (!list_empty(&fmt->sort_list))
- continue;
-
- /*
- * sort entry fields are dynamically created,
- * so they can share a same sort key even though
- * the list is empty.
- */
- if (perf_hpp__is_sort_entry(fmt)) {
- struct perf_hpp_fmt *pos;
+ perf_hpp_list__for_each_format(list, fmt) {
+ struct perf_hpp_fmt *pos;
- perf_hpp__for_each_sort_list(pos) {
- if (perf_hpp__same_sort_entry(pos, fmt))
- goto next;
- }
+ perf_hpp_list__for_each_sort_list(list, pos) {
+ if (fmt_equal(fmt, pos))
+ goto next;
}
perf_hpp__register_sort_field(fmt);
@@ -579,20 +587,29 @@ next:
}
}
-void perf_hpp__reset_output_field(void)
+
+static void fmt_free(struct perf_hpp_fmt *fmt)
+{
+ if (fmt->free)
+ fmt->free(fmt);
+}
+
+void perf_hpp__reset_output_field(struct perf_hpp_list *list)
{
struct perf_hpp_fmt *fmt, *tmp;
/* reset output fields */
- perf_hpp__for_each_format_safe(fmt, tmp) {
+ perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
list_del_init(&fmt->list);
list_del_init(&fmt->sort_list);
+ fmt_free(fmt);
}
/* reset sort keys */
- perf_hpp__for_each_sort_list_safe(fmt, tmp) {
+ perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
list_del_init(&fmt->list);
list_del_init(&fmt->sort_list);
+ fmt_free(fmt);
}
}
@@ -606,7 +623,7 @@ unsigned int hists__sort_list_width(struct hists *hists)
bool first = true;
struct perf_hpp dummy_hpp;
- perf_hpp__for_each_format(fmt) {
+ hists__for_each_format(hists, fmt) {
if (perf_hpp__should_skip(fmt, hists))
continue;
@@ -624,22 +641,39 @@ unsigned int hists__sort_list_width(struct hists *hists)
return ret;
}
-void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
+unsigned int hists__overhead_width(struct hists *hists)
{
- int idx;
-
- if (perf_hpp__is_sort_entry(fmt))
- return perf_hpp__reset_sort_width(fmt, hists);
+ struct perf_hpp_fmt *fmt;
+ int ret = 0;
+ bool first = true;
+ struct perf_hpp dummy_hpp;
- for (idx = 0; idx < PERF_HPP__MAX_INDEX; idx++) {
- if (fmt == &perf_hpp__format[idx])
+ hists__for_each_format(hists, fmt) {
+ if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
break;
+
+ if (first)
+ first = false;
+ else
+ ret += 2;
+
+ ret += fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists));
}
- if (idx == PERF_HPP__MAX_INDEX)
+ return ret;
+}
+
+void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
+{
+ if (perf_hpp__is_sort_entry(fmt))
+ return perf_hpp__reset_sort_width(fmt, hists);
+
+ if (perf_hpp__is_dynamic_entry(fmt))
return;
- switch (idx) {
+ BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
+
+ switch (fmt->idx) {
case PERF_HPP__OVERHEAD:
case PERF_HPP__OVERHEAD_SYS:
case PERF_HPP__OVERHEAD_US:
@@ -667,7 +701,7 @@ void perf_hpp__set_user_width(const char *width_list_str)
struct perf_hpp_fmt *fmt;
const char *ptr = width_list_str;
- perf_hpp__for_each_format(fmt) {
+ perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
char *p;
int len = strtol(ptr, &p, 10);
@@ -679,3 +713,71 @@ void perf_hpp__set_user_width(const char *width_list_str)
break;
}
}
+
+static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
+{
+ struct perf_hpp_list_node *node = NULL;
+ struct perf_hpp_fmt *fmt_copy;
+ bool found = false;
+ bool skip = perf_hpp__should_skip(fmt, hists);
+
+ list_for_each_entry(node, &hists->hpp_formats, list) {
+ if (node->level == fmt->level) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ node = malloc(sizeof(*node));
+ if (node == NULL)
+ return -1;
+
+ node->skip = skip;
+ node->level = fmt->level;
+ perf_hpp_list__init(&node->hpp);
+
+ hists->nr_hpp_node++;
+ list_add_tail(&node->list, &hists->hpp_formats);
+ }
+
+ fmt_copy = perf_hpp_fmt__dup(fmt);
+ if (fmt_copy == NULL)
+ return -1;
+
+ if (!skip)
+ node->skip = false;
+
+ list_add_tail(&fmt_copy->list, &node->hpp.fields);
+ list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
+
+ return 0;
+}
+
+int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
+ struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+ struct perf_hpp_fmt *fmt;
+ struct hists *hists;
+ int ret;
+
+ if (!symbol_conf.report_hierarchy)
+ return 0;
+
+ evlist__for_each(evlist, evsel) {
+ hists = evsel__hists(evsel);
+
+ perf_hpp_list__for_each_sort_list(list, fmt) {
+ if (perf_hpp__is_dynamic_entry(fmt) &&
+ !perf_hpp__defined_dynamic_entry(fmt, hists))
+ continue;
+
+ ret = add_hierarchy_fmt(hists, fmt);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 387110d50b00..7aff5acf3265 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -165,8 +165,28 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
return ret;
}
+/*
+ * If have one single callchain root, don't bother printing
+ * its percentage (100 % in fractal mode and the same percentage
+ * than the hist in graph mode). This also avoid one level of column.
+ *
+ * However when percent-limit applied, it's possible that single callchain
+ * node have different (non-100% in fractal mode) percentage.
+ */
+static bool need_percent_display(struct rb_node *node, u64 parent_samples)
+{
+ struct callchain_node *cnode;
+
+ if (rb_next(node))
+ return true;
+
+ cnode = rb_entry(node, struct callchain_node, rb_node);
+ return callchain_cumul_hits(cnode) != parent_samples;
+}
+
static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
- u64 total_samples, int left_margin)
+ u64 total_samples, u64 parent_samples,
+ int left_margin)
{
struct callchain_node *cnode;
struct callchain_list *chain;
@@ -177,13 +197,8 @@ static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
int ret = 0;
char bf[1024];
- /*
- * If have one single callchain root, don't bother printing
- * its percentage (100 % in fractal mode and the same percentage
- * than the hist in graph mode). This also avoid one level of column.
- */
node = rb_first(root);
- if (node && !rb_next(node)) {
+ if (node && !need_percent_display(node, parent_samples)) {
cnode = rb_entry(node, struct callchain_node, rb_node);
list_for_each_entry(chain, &cnode->val, list) {
/*
@@ -213,9 +228,15 @@ static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
root = &cnode->rb_root;
}
+ if (callchain_param.mode == CHAIN_GRAPH_REL)
+ total_samples = parent_samples;
+
ret += __callchain__fprintf_graph(fp, root, total_samples,
1, 1, left_margin);
- ret += fprintf(fp, "\n");
+ if (ret) {
+ /* do not add a blank line if it printed nothing */
+ ret += fprintf(fp, "\n");
+ }
return ret;
}
@@ -323,16 +344,19 @@ static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
u64 total_samples, int left_margin,
FILE *fp)
{
+ u64 parent_samples = he->stat.period;
+
+ if (symbol_conf.cumulate_callchain)
+ parent_samples = he->stat_acc->period;
+
switch (callchain_param.mode) {
case CHAIN_GRAPH_REL:
- return callchain__fprintf_graph(fp, &he->sorted_chain,
- symbol_conf.cumulate_callchain ?
- he->stat_acc->period : he->stat.period,
- left_margin);
+ return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
+ parent_samples, left_margin);
break;
case CHAIN_GRAPH_ABS:
return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
- left_margin);
+ parent_samples, left_margin);
break;
case CHAIN_FLAT:
return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
@@ -349,45 +373,66 @@ static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
return 0;
}
-static size_t hist_entry__callchain_fprintf(struct hist_entry *he,
- struct hists *hists,
- FILE *fp)
+static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
{
- int left_margin = 0;
- u64 total_period = hists->stats.total_period;
+ const char *sep = symbol_conf.field_sep;
+ struct perf_hpp_fmt *fmt;
+ char *start = hpp->buf;
+ int ret;
+ bool first = true;
- if (field_order == NULL && (sort_order == NULL ||
- !prefixcmp(sort_order, "comm"))) {
- struct perf_hpp_fmt *fmt;
+ if (symbol_conf.exclude_other && !he->parent)
+ return 0;
- perf_hpp__for_each_format(fmt) {
- if (!perf_hpp__is_sort_entry(fmt))
- continue;
+ hists__for_each_format(he->hists, fmt) {
+ if (perf_hpp__should_skip(fmt, he->hists))
+ continue;
- /* must be 'comm' sort entry */
- left_margin = fmt->width(fmt, NULL, hists_to_evsel(hists));
- left_margin -= thread__comm_len(he->thread);
- break;
- }
+ /*
+ * If there's no field_sep, we still need
+ * to display initial ' '.
+ */
+ if (!sep || !first) {
+ ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
+ advance_hpp(hpp, ret);
+ } else
+ first = false;
+
+ if (perf_hpp__use_color() && fmt->color)
+ ret = fmt->color(fmt, hpp, he);
+ else
+ ret = fmt->entry(fmt, hpp, he);
+
+ ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
+ advance_hpp(hpp, ret);
}
- return hist_entry_callchain__fprintf(he, total_period, left_margin, fp);
+
+ return hpp->buf - start;
}
-static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
+static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
+ struct perf_hpp *hpp,
+ struct hists *hists,
+ FILE *fp)
{
const char *sep = symbol_conf.field_sep;
struct perf_hpp_fmt *fmt;
- char *start = hpp->buf;
- int ret;
+ struct perf_hpp_list_node *fmt_node;
+ char *buf = hpp->buf;
+ size_t size = hpp->size;
+ int ret, printed = 0;
bool first = true;
if (symbol_conf.exclude_other && !he->parent)
return 0;
- perf_hpp__for_each_format(fmt) {
- if (perf_hpp__should_skip(fmt, he->hists))
- continue;
+ ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
+ advance_hpp(hpp, ret);
+ /* the first hpp_list_node is for overhead columns */
+ fmt_node = list_first_entry(&hists->hpp_formats,
+ struct perf_hpp_list_node, list);
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
/*
* If there's no field_sep, we still need
* to display initial ' '.
@@ -403,10 +448,47 @@ static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
else
ret = fmt->entry(fmt, hpp, he);
+ ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
advance_hpp(hpp, ret);
}
- return hpp->buf - start;
+ if (!sep)
+ ret = scnprintf(hpp->buf, hpp->size, "%*s",
+ (hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
+ advance_hpp(hpp, ret);
+
+ printed += fprintf(fp, "%s", buf);
+
+ perf_hpp_list__for_each_format(he->hpp_list, fmt) {
+ hpp->buf = buf;
+ hpp->size = size;
+
+ /*
+ * No need to call hist_entry__snprintf_alignment() since this
+ * fmt is always the last column in the hierarchy mode.
+ */
+ if (perf_hpp__use_color() && fmt->color)
+ fmt->color(fmt, hpp, he);
+ else
+ fmt->entry(fmt, hpp, he);
+
+ /*
+ * dynamic entries are right-aligned but we want left-aligned
+ * in the hierarchy mode
+ */
+ printed += fprintf(fp, "%s%s", sep ?: " ", ltrim(buf));
+ }
+ printed += putc('\n', fp);
+
+ if (symbol_conf.use_callchain && he->leaf) {
+ u64 total = hists__total_period(hists);
+
+ printed += hist_entry_callchain__fprintf(he, total, 0, fp);
+ goto out;
+ }
+
+out:
+ return printed;
}
static int hist_entry__fprintf(struct hist_entry *he, size_t size,
@@ -418,24 +500,134 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size,
.buf = bf,
.size = size,
};
+ u64 total_period = hists->stats.total_period;
if (size == 0 || size > bfsz)
size = hpp.size = bfsz;
+ if (symbol_conf.report_hierarchy)
+ return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
+
hist_entry__snprintf(he, &hpp);
ret = fprintf(fp, "%s\n", bf);
if (symbol_conf.use_callchain)
- ret += hist_entry__callchain_fprintf(he, hists, fp);
+ ret += hist_entry_callchain__fprintf(he, total_period, 0, fp);
return ret;
}
+static int print_hierarchy_indent(const char *sep, int indent,
+ const char *line, FILE *fp)
+{
+ if (sep != NULL || indent < 2)
+ return 0;
+
+ return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
+}
+
+static int print_hierarchy_header(struct hists *hists, struct perf_hpp *hpp,
+ const char *sep, FILE *fp)
+{
+ bool first_node, first_col;
+ int indent;
+ int depth;
+ unsigned width = 0;
+ unsigned header_width = 0;
+ struct perf_hpp_fmt *fmt;
+ struct perf_hpp_list_node *fmt_node;
+
+ indent = hists->nr_hpp_node;
+
+ /* preserve max indent depth for column headers */
+ print_hierarchy_indent(sep, indent, spaces, fp);
+
+ /* the first hpp_list_node is for overhead columns */
+ fmt_node = list_first_entry(&hists->hpp_formats,
+ struct perf_hpp_list_node, list);
+
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
+ fmt->header(fmt, hpp, hists_to_evsel(hists));
+ fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
+ }
+
+ /* combine sort headers with ' / ' */
+ first_node = true;
+ list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
+ if (!first_node)
+ header_width += fprintf(fp, " / ");
+ first_node = false;
+
+ first_col = true;
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
+ if (perf_hpp__should_skip(fmt, hists))
+ continue;
+
+ if (!first_col)
+ header_width += fprintf(fp, "+");
+ first_col = false;
+
+ fmt->header(fmt, hpp, hists_to_evsel(hists));
+ rtrim(hpp->buf);
+
+ header_width += fprintf(fp, "%s", ltrim(hpp->buf));
+ }
+ }
+
+ fprintf(fp, "\n# ");
+
+ /* preserve max indent depth for initial dots */
+ print_hierarchy_indent(sep, indent, dots, fp);
+
+ /* the first hpp_list_node is for overhead columns */
+ fmt_node = list_first_entry(&hists->hpp_formats,
+ struct perf_hpp_list_node, list);
+
+ first_col = true;
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
+ if (!first_col)
+ fprintf(fp, "%s", sep ?: "..");
+ first_col = false;
+
+ width = fmt->width(fmt, hpp, hists_to_evsel(hists));
+ fprintf(fp, "%.*s", width, dots);
+ }
+
+ depth = 0;
+ list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
+ first_col = true;
+ width = depth * HIERARCHY_INDENT;
+
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
+ if (perf_hpp__should_skip(fmt, hists))
+ continue;
+
+ if (!first_col)
+ width++; /* for '+' sign between column header */
+ first_col = false;
+
+ width += fmt->width(fmt, hpp, hists_to_evsel(hists));
+ }
+
+ if (width > header_width)
+ header_width = width;
+
+ depth++;
+ }
+
+ fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots);
+
+ fprintf(fp, "\n#\n");
+
+ return 2;
+}
+
size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
int max_cols, float min_pcnt, FILE *fp)
{
struct perf_hpp_fmt *fmt;
+ struct perf_hpp_list_node *fmt_node;
struct rb_node *nd;
size_t ret = 0;
unsigned int width;
@@ -449,10 +641,11 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
bool first = true;
size_t linesz;
char *line = NULL;
+ unsigned indent;
init_rem_hits();
- perf_hpp__for_each_format(fmt)
+ hists__for_each_format(hists, fmt)
perf_hpp__reset_width(fmt, hists);
if (symbol_conf.col_width_list_str)
@@ -463,7 +656,16 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
fprintf(fp, "# ");
- perf_hpp__for_each_format(fmt) {
+ if (symbol_conf.report_hierarchy) {
+ list_for_each_entry(fmt_node, &hists->hpp_formats, list) {
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt)
+ perf_hpp__reset_width(fmt, hists);
+ }
+ nr_rows += print_hierarchy_header(hists, &dummy_hpp, sep, fp);
+ goto print_entries;
+ }
+
+ hists__for_each_format(hists, fmt) {
if (perf_hpp__should_skip(fmt, hists))
continue;
@@ -487,7 +689,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
fprintf(fp, "# ");
- perf_hpp__for_each_format(fmt) {
+ hists__for_each_format(hists, fmt) {
unsigned int i;
if (perf_hpp__should_skip(fmt, hists))
@@ -520,7 +722,9 @@ print_entries:
goto out;
}
- for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+ indent = hists__overhead_width(hists) + 4;
+
+ for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
float percent;
@@ -536,6 +740,20 @@ print_entries:
if (max_rows && ++nr_rows >= max_rows)
break;
+ /*
+ * If all children are filtered out or percent-limited,
+ * display "no entry >= x.xx%" message.
+ */
+ if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
+ int depth = hists->nr_hpp_node + h->depth + 1;
+
+ print_hierarchy_indent(sep, depth, spaces, fp);
+ fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
+
+ if (max_rows && ++nr_rows >= max_rows)
+ break;
+ }
+
if (h->ms.map == NULL && verbose > 1) {
__map_groups__fprintf_maps(h->thread->mg,
MAP__FUNCTION, fp);
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 5eec53a3f4ac..eea25e2424e9 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -82,6 +82,7 @@ libperf-y += parse-branch-options.o
libperf-y += parse-regs-options.o
libperf-y += term.o
libperf-y += help-unknown-cmd.o
+libperf-y += mem-events.o
libperf-$(CONFIG_LIBBPF) += bpf-loader.o
libperf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o
@@ -105,8 +106,17 @@ libperf-y += scripting-engines/
libperf-$(CONFIG_ZLIB) += zlib.o
libperf-$(CONFIG_LZMA) += lzma.o
+libperf-y += demangle-java.o
+
+ifdef CONFIG_JITDUMP
+libperf-$(CONFIG_LIBELF) += jitdump.o
+libperf-$(CONFIG_LIBELF) += genelf.o
+libperf-$(CONFIG_LIBELF) += genelf_debug.o
+endif
CFLAGS_config.o += -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
+# avoid compiler warnings in 32-bit mode
+CFLAGS_genelf_debug.o += -Wno-packed
$(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c
$(call rule_mkdir)
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 360fda01f3b0..ec164fe70718 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -478,10 +478,11 @@ void auxtrace_heap__pop(struct auxtrace_heap *heap)
heap_array[last].ordinal);
}
-size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr)
+size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
+ struct perf_evlist *evlist)
{
if (itr)
- return itr->info_priv_size(itr);
+ return itr->info_priv_size(itr, evlist);
return 0;
}
@@ -852,7 +853,7 @@ int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
int err;
pr_debug2("Synthesizing auxtrace information\n");
- priv_size = auxtrace_record__info_priv_size(itr);
+ priv_size = auxtrace_record__info_priv_size(itr, session->evlist);
ev = zalloc(sizeof(struct auxtrace_info_event) + priv_size);
if (!ev)
return -ENOMEM;
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index b86f90db1352..e5a8e2d4f2af 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -293,7 +293,8 @@ struct auxtrace_record {
int (*recording_options)(struct auxtrace_record *itr,
struct perf_evlist *evlist,
struct record_opts *opts);
- size_t (*info_priv_size)(struct auxtrace_record *itr);
+ size_t (*info_priv_size)(struct auxtrace_record *itr,
+ struct perf_evlist *evlist);
int (*info_fill)(struct auxtrace_record *itr,
struct perf_session *session,
struct auxtrace_info_event *auxtrace_info,
@@ -429,7 +430,8 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
int auxtrace_record__options(struct auxtrace_record *itr,
struct perf_evlist *evlist,
struct record_opts *opts);
-size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr);
+size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
+ struct perf_evlist *evlist);
int auxtrace_record__info_fill(struct auxtrace_record *itr,
struct perf_session *session,
struct auxtrace_info_event *auxtrace_info,
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 540a7efa657e..0967ce601931 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -7,6 +7,7 @@
#include <linux/bpf.h>
#include <bpf/libbpf.h>
+#include <bpf/bpf.h>
#include <linux/err.h>
#include <linux/string.h>
#include "perf.h"
@@ -16,6 +17,7 @@
#include "llvm-utils.h"
#include "probe-event.h"
#include "probe-finder.h" // for MAX_PROBES
+#include "parse-events.h"
#include "llvm-utils.h"
#define DEFINE_PRINT_FN(name, level) \
@@ -108,8 +110,8 @@ void bpf__clear(void)
}
static void
-bpf_prog_priv__clear(struct bpf_program *prog __maybe_unused,
- void *_priv)
+clear_prog_priv(struct bpf_program *prog __maybe_unused,
+ void *_priv)
{
struct bpf_prog_priv *priv = _priv;
@@ -337,7 +339,7 @@ config_bpf_program(struct bpf_program *prog)
}
pr_debug("bpf: config '%s' is ok\n", config_str);
- err = bpf_program__set_private(prog, priv, bpf_prog_priv__clear);
+ err = bpf_program__set_private(prog, priv, clear_prog_priv);
if (err) {
pr_debug("Failed to set priv for program '%s'\n", config_str);
goto errout;
@@ -739,6 +741,682 @@ int bpf__foreach_tev(struct bpf_object *obj,
return 0;
}
+enum bpf_map_op_type {
+ BPF_MAP_OP_SET_VALUE,
+ BPF_MAP_OP_SET_EVSEL,
+};
+
+enum bpf_map_key_type {
+ BPF_MAP_KEY_ALL,
+ BPF_MAP_KEY_RANGES,
+};
+
+struct bpf_map_op {
+ struct list_head list;
+ enum bpf_map_op_type op_type;
+ enum bpf_map_key_type key_type;
+ union {
+ struct parse_events_array array;
+ } k;
+ union {
+ u64 value;
+ struct perf_evsel *evsel;
+ } v;
+};
+
+struct bpf_map_priv {
+ struct list_head ops_list;
+};
+
+static void
+bpf_map_op__delete(struct bpf_map_op *op)
+{
+ if (!list_empty(&op->list))
+ list_del(&op->list);
+ if (op->key_type == BPF_MAP_KEY_RANGES)
+ parse_events__clear_array(&op->k.array);
+ free(op);
+}
+
+static void
+bpf_map_priv__purge(struct bpf_map_priv *priv)
+{
+ struct bpf_map_op *pos, *n;
+
+ list_for_each_entry_safe(pos, n, &priv->ops_list, list) {
+ list_del_init(&pos->list);
+ bpf_map_op__delete(pos);
+ }
+}
+
+static void
+bpf_map_priv__clear(struct bpf_map *map __maybe_unused,
+ void *_priv)
+{
+ struct bpf_map_priv *priv = _priv;
+
+ bpf_map_priv__purge(priv);
+ free(priv);
+}
+
+static int
+bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term)
+{
+ op->key_type = BPF_MAP_KEY_ALL;
+ if (!term)
+ return 0;
+
+ if (term->array.nr_ranges) {
+ size_t memsz = term->array.nr_ranges *
+ sizeof(op->k.array.ranges[0]);
+
+ op->k.array.ranges = memdup(term->array.ranges, memsz);
+ if (!op->k.array.ranges) {
+ pr_debug("No enough memory to alloc indices for map\n");
+ return -ENOMEM;
+ }
+ op->key_type = BPF_MAP_KEY_RANGES;
+ op->k.array.nr_ranges = term->array.nr_ranges;
+ }
+ return 0;
+}
+
+static struct bpf_map_op *
+bpf_map_op__new(struct parse_events_term *term)
+{
+ struct bpf_map_op *op;
+ int err;
+
+ op = zalloc(sizeof(*op));
+ if (!op) {
+ pr_debug("Failed to alloc bpf_map_op\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ INIT_LIST_HEAD(&op->list);
+
+ err = bpf_map_op_setkey(op, term);
+ if (err) {
+ free(op);
+ return ERR_PTR(err);
+ }
+ return op;
+}
+
+static int
+bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
+{
+ struct bpf_map_priv *priv;
+ const char *map_name;
+ int err;
+
+ map_name = bpf_map__get_name(map);
+ err = bpf_map__get_private(map, (void **)&priv);
+ if (err) {
+ pr_debug("Failed to get private from map %s\n", map_name);
+ return err;
+ }
+
+ if (!priv) {
+ priv = zalloc(sizeof(*priv));
+ if (!priv) {
+ pr_debug("No enough memory to alloc map private\n");
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&priv->ops_list);
+
+ if (bpf_map__set_private(map, priv, bpf_map_priv__clear)) {
+ free(priv);
+ return -BPF_LOADER_ERRNO__INTERNAL;
+ }
+ }
+
+ list_add_tail(&op->list, &priv->ops_list);
+ return 0;
+}
+
+static struct bpf_map_op *
+bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term)
+{
+ struct bpf_map_op *op;
+ int err;
+
+ op = bpf_map_op__new(term);
+ if (IS_ERR(op))
+ return op;
+
+ err = bpf_map__add_op(map, op);
+ if (err) {
+ bpf_map_op__delete(op);
+ return ERR_PTR(err);
+ }
+ return op;
+}
+
+static int
+__bpf_map__config_value(struct bpf_map *map,
+ struct parse_events_term *term)
+{
+ struct bpf_map_def def;
+ struct bpf_map_op *op;
+ const char *map_name;
+ int err;
+
+ map_name = bpf_map__get_name(map);
+
+ err = bpf_map__get_def(map, &def);
+ if (err) {
+ pr_debug("Unable to get map definition from '%s'\n",
+ map_name);
+ return -BPF_LOADER_ERRNO__INTERNAL;
+ }
+
+ if (def.type != BPF_MAP_TYPE_ARRAY) {
+ pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
+ map_name);
+ return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
+ }
+ if (def.key_size < sizeof(unsigned int)) {
+ pr_debug("Map %s has incorrect key size\n", map_name);
+ return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE;
+ }
+ switch (def.value_size) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ pr_debug("Map %s has incorrect value size\n", map_name);
+ return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
+ }
+
+ op = bpf_map__add_newop(map, term);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
+ op->op_type = BPF_MAP_OP_SET_VALUE;
+ op->v.value = term->val.num;
+ return 0;
+}
+
+static int
+bpf_map__config_value(struct bpf_map *map,
+ struct parse_events_term *term,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ if (!term->err_val) {
+ pr_debug("Config value not set\n");
+ return -BPF_LOADER_ERRNO__OBJCONF_CONF;
+ }
+
+ if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) {
+ pr_debug("ERROR: wrong value type for 'value'\n");
+ return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
+ }
+
+ return __bpf_map__config_value(map, term);
+}
+
+static int
+__bpf_map__config_event(struct bpf_map *map,
+ struct parse_events_term *term,
+ struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+ struct bpf_map_def def;
+ struct bpf_map_op *op;
+ const char *map_name;
+ int err;
+
+ map_name = bpf_map__get_name(map);
+ evsel = perf_evlist__find_evsel_by_str(evlist, term->val.str);
+ if (!evsel) {
+ pr_debug("Event (for '%s') '%s' doesn't exist\n",
+ map_name, term->val.str);
+ return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT;
+ }
+
+ err = bpf_map__get_def(map, &def);
+ if (err) {
+ pr_debug("Unable to get map definition from '%s'\n",
+ map_name);
+ return err;
+ }
+
+ /*
+ * No need to check key_size and value_size:
+ * kernel has already checked them.
+ */
+ if (def.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
+ pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
+ map_name);
+ return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
+ }
+
+ op = bpf_map__add_newop(map, term);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
+ op->op_type = BPF_MAP_OP_SET_EVSEL;
+ op->v.evsel = evsel;
+ return 0;
+}
+
+static int
+bpf_map__config_event(struct bpf_map *map,
+ struct parse_events_term *term,
+ struct perf_evlist *evlist)
+{
+ if (!term->err_val) {
+ pr_debug("Config value not set\n");
+ return -BPF_LOADER_ERRNO__OBJCONF_CONF;
+ }
+
+ if (term->type_val != PARSE_EVENTS__TERM_TYPE_STR) {
+ pr_debug("ERROR: wrong value type for 'event'\n");
+ return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
+ }
+
+ return __bpf_map__config_event(map, term, evlist);
+}
+
+struct bpf_obj_config__map_func {
+ const char *config_opt;
+ int (*config_func)(struct bpf_map *, struct parse_events_term *,
+ struct perf_evlist *);
+};
+
+struct bpf_obj_config__map_func bpf_obj_config__map_funcs[] = {
+ {"value", bpf_map__config_value},
+ {"event", bpf_map__config_event},
+};
+
+static int
+config_map_indices_range_check(struct parse_events_term *term,
+ struct bpf_map *map,
+ const char *map_name)
+{
+ struct parse_events_array *array = &term->array;
+ struct bpf_map_def def;
+ unsigned int i;
+ int err;
+
+ if (!array->nr_ranges)
+ return 0;
+ if (!array->ranges) {
+ pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n",
+ map_name, (int)array->nr_ranges);
+ return -BPF_LOADER_ERRNO__INTERNAL;
+ }
+
+ err = bpf_map__get_def(map, &def);
+ if (err) {
+ pr_debug("ERROR: Unable to get map definition from '%s'\n",
+ map_name);
+ return -BPF_LOADER_ERRNO__INTERNAL;
+ }
+
+ for (i = 0; i < array->nr_ranges; i++) {
+ unsigned int start = array->ranges[i].start;
+ size_t length = array->ranges[i].length;
+ unsigned int idx = start + length - 1;
+
+ if (idx >= def.max_entries) {
+ pr_debug("ERROR: index %d too large\n", idx);
+ return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG;
+ }
+ }
+ return 0;
+}
+
+static int
+bpf__obj_config_map(struct bpf_object *obj,
+ struct parse_events_term *term,
+ struct perf_evlist *evlist,
+ int *key_scan_pos)
+{
+ /* key is "map:<mapname>.<config opt>" */
+ char *map_name = strdup(term->config + sizeof("map:") - 1);
+ struct bpf_map *map;
+ int err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
+ char *map_opt;
+ size_t i;
+
+ if (!map_name)
+ return -ENOMEM;
+
+ map_opt = strchr(map_name, '.');
+ if (!map_opt) {
+ pr_debug("ERROR: Invalid map config: %s\n", map_name);
+ goto out;
+ }
+
+ *map_opt++ = '\0';
+ if (*map_opt == '\0') {
+ pr_debug("ERROR: Invalid map option: %s\n", term->config);
+ goto out;
+ }
+
+ map = bpf_object__get_map_by_name(obj, map_name);
+ if (!map) {
+ pr_debug("ERROR: Map %s doesn't exist\n", map_name);
+ err = -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST;
+ goto out;
+ }
+
+ *key_scan_pos += strlen(map_opt);
+ err = config_map_indices_range_check(term, map, map_name);
+ if (err)
+ goto out;
+ *key_scan_pos -= strlen(map_opt);
+
+ for (i = 0; i < ARRAY_SIZE(bpf_obj_config__map_funcs); i++) {
+ struct bpf_obj_config__map_func *func =
+ &bpf_obj_config__map_funcs[i];
+
+ if (strcmp(map_opt, func->config_opt) == 0) {
+ err = func->config_func(map, term, evlist);
+ goto out;
+ }
+ }
+
+ pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
+ err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
+out:
+ free(map_name);
+ if (!err)
+ key_scan_pos += strlen(map_opt);
+ return err;
+}
+
+int bpf__config_obj(struct bpf_object *obj,
+ struct parse_events_term *term,
+ struct perf_evlist *evlist,
+ int *error_pos)
+{
+ int key_scan_pos = 0;
+ int err;
+
+ if (!obj || !term || !term->config)
+ return -EINVAL;
+
+ if (!prefixcmp(term->config, "map:")) {
+ key_scan_pos = sizeof("map:") - 1;
+ err = bpf__obj_config_map(obj, term, evlist, &key_scan_pos);
+ goto out;
+ }
+ err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
+out:
+ if (error_pos)
+ *error_pos = key_scan_pos;
+ return err;
+
+}
+
+typedef int (*map_config_func_t)(const char *name, int map_fd,
+ struct bpf_map_def *pdef,
+ struct bpf_map_op *op,
+ void *pkey, void *arg);
+
+static int
+foreach_key_array_all(map_config_func_t func,
+ void *arg, const char *name,
+ int map_fd, struct bpf_map_def *pdef,
+ struct bpf_map_op *op)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < pdef->max_entries; i++) {
+ err = func(name, map_fd, pdef, op, &i, arg);
+ if (err) {
+ pr_debug("ERROR: failed to insert value to %s[%u]\n",
+ name, i);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int
+foreach_key_array_ranges(map_config_func_t func, void *arg,
+ const char *name, int map_fd,
+ struct bpf_map_def *pdef,
+ struct bpf_map_op *op)
+{
+ unsigned int i, j;
+ int err;
+
+ for (i = 0; i < op->k.array.nr_ranges; i++) {
+ unsigned int start = op->k.array.ranges[i].start;
+ size_t length = op->k.array.ranges[i].length;
+
+ for (j = 0; j < length; j++) {
+ unsigned int idx = start + j;
+
+ err = func(name, map_fd, pdef, op, &idx, arg);
+ if (err) {
+ pr_debug("ERROR: failed to insert value to %s[%u]\n",
+ name, idx);
+ return err;
+ }
+ }
+ }
+ return 0;
+}
+
+static int
+bpf_map_config_foreach_key(struct bpf_map *map,
+ map_config_func_t func,
+ void *arg)
+{
+ int err, map_fd;
+ const char *name;
+ struct bpf_map_op *op;
+ struct bpf_map_def def;
+ struct bpf_map_priv *priv;
+
+ name = bpf_map__get_name(map);
+
+ err = bpf_map__get_private(map, (void **)&priv);
+ if (err) {
+ pr_debug("ERROR: failed to get private from map %s\n", name);
+ return -BPF_LOADER_ERRNO__INTERNAL;
+ }
+ if (!priv || list_empty(&priv->ops_list)) {
+ pr_debug("INFO: nothing to config for map %s\n", name);
+ return 0;
+ }
+
+ err = bpf_map__get_def(map, &def);
+ if (err) {
+ pr_debug("ERROR: failed to get definition from map %s\n", name);
+ return -BPF_LOADER_ERRNO__INTERNAL;
+ }
+ map_fd = bpf_map__get_fd(map);
+ if (map_fd < 0) {
+ pr_debug("ERROR: failed to get fd from map %s\n", name);
+ return map_fd;
+ }
+
+ list_for_each_entry(op, &priv->ops_list, list) {
+ switch (def.type) {
+ case BPF_MAP_TYPE_ARRAY:
+ case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
+ switch (op->key_type) {
+ case BPF_MAP_KEY_ALL:
+ err = foreach_key_array_all(func, arg, name,
+ map_fd, &def, op);
+ break;
+ case BPF_MAP_KEY_RANGES:
+ err = foreach_key_array_ranges(func, arg, name,
+ map_fd, &def,
+ op);
+ break;
+ default:
+ pr_debug("ERROR: keytype for map '%s' invalid\n",
+ name);
+ return -BPF_LOADER_ERRNO__INTERNAL;
+ }
+ if (err)
+ return err;
+ break;
+ default:
+ pr_debug("ERROR: type of '%s' incorrect\n", name);
+ return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
+ }
+ }
+
+ return 0;
+}
+
+static int
+apply_config_value_for_key(int map_fd, void *pkey,
+ size_t val_size, u64 val)
+{
+ int err = 0;
+
+ switch (val_size) {
+ case 1: {
+ u8 _val = (u8)(val);
+ err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
+ break;
+ }
+ case 2: {
+ u16 _val = (u16)(val);
+ err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
+ break;
+ }
+ case 4: {
+ u32 _val = (u32)(val);
+ err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
+ break;
+ }
+ case 8: {
+ err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY);
+ break;
+ }
+ default:
+ pr_debug("ERROR: invalid value size\n");
+ return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
+ }
+ if (err && errno)
+ err = -errno;
+ return err;
+}
+
+static int
+apply_config_evsel_for_key(const char *name, int map_fd, void *pkey,
+ struct perf_evsel *evsel)
+{
+ struct xyarray *xy = evsel->fd;
+ struct perf_event_attr *attr;
+ unsigned int key, events;
+ bool check_pass = false;
+ int *evt_fd;
+ int err;
+
+ if (!xy) {
+ pr_debug("ERROR: evsel not ready for map %s\n", name);
+ return -BPF_LOADER_ERRNO__INTERNAL;
+ }
+
+ if (xy->row_size / xy->entry_size != 1) {
+ pr_debug("ERROR: Dimension of target event is incorrect for map %s\n",
+ name);
+ return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM;
+ }
+
+ attr = &evsel->attr;
+ if (attr->inherit) {
+ pr_debug("ERROR: Can't put inherit event into map %s\n", name);
+ return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH;
+ }
+
+ if (perf_evsel__is_bpf_output(evsel))
+ check_pass = true;
+ if (attr->type == PERF_TYPE_RAW)
+ check_pass = true;
+ if (attr->type == PERF_TYPE_HARDWARE)
+ check_pass = true;
+ if (!check_pass) {
+ pr_debug("ERROR: Event type is wrong for map %s\n", name);
+ return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE;
+ }
+
+ events = xy->entries / (xy->row_size / xy->entry_size);
+ key = *((unsigned int *)pkey);
+ if (key >= events) {
+ pr_debug("ERROR: there is no event %d for map %s\n",
+ key, name);
+ return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE;
+ }
+ evt_fd = xyarray__entry(xy, key, 0);
+ err = bpf_map_update_elem(map_fd, pkey, evt_fd, BPF_ANY);
+ if (err && errno)
+ err = -errno;
+ return err;
+}
+
+static int
+apply_obj_config_map_for_key(const char *name, int map_fd,
+ struct bpf_map_def *pdef __maybe_unused,
+ struct bpf_map_op *op,
+ void *pkey, void *arg __maybe_unused)
+{
+ int err;
+
+ switch (op->op_type) {
+ case BPF_MAP_OP_SET_VALUE:
+ err = apply_config_value_for_key(map_fd, pkey,
+ pdef->value_size,
+ op->v.value);
+ break;
+ case BPF_MAP_OP_SET_EVSEL:
+ err = apply_config_evsel_for_key(name, map_fd, pkey,
+ op->v.evsel);
+ break;
+ default:
+ pr_debug("ERROR: unknown value type for '%s'\n", name);
+ err = -BPF_LOADER_ERRNO__INTERNAL;
+ }
+ return err;
+}
+
+static int
+apply_obj_config_map(struct bpf_map *map)
+{
+ return bpf_map_config_foreach_key(map,
+ apply_obj_config_map_for_key,
+ NULL);
+}
+
+static int
+apply_obj_config_object(struct bpf_object *obj)
+{
+ struct bpf_map *map;
+ int err;
+
+ bpf_map__for_each(map, obj) {
+ err = apply_obj_config_map(map);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+int bpf__apply_obj_config(void)
+{
+ struct bpf_object *obj, *tmp;
+ int err;
+
+ bpf_object__for_each_safe(obj, tmp) {
+ err = apply_obj_config_object(obj);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
#define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START)
#define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
#define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
@@ -753,6 +1431,20 @@ static const char *bpf_loader_strerror_table[NR_ERRNO] = {
[ERRCODE_OFFSET(PROLOGUE)] = "Failed to generate prologue",
[ERRCODE_OFFSET(PROLOGUE2BIG)] = "Prologue too big for program",
[ERRCODE_OFFSET(PROLOGUEOOB)] = "Offset out of bound for prologue",
+ [ERRCODE_OFFSET(OBJCONF_OPT)] = "Invalid object config option",
+ [ERRCODE_OFFSET(OBJCONF_CONF)] = "Config value not set (missing '=')",
+ [ERRCODE_OFFSET(OBJCONF_MAP_OPT)] = "Invalid object map config option",
+ [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST)] = "Target map doesn't exist",
+ [ERRCODE_OFFSET(OBJCONF_MAP_VALUE)] = "Incorrect value type for map",
+ [ERRCODE_OFFSET(OBJCONF_MAP_TYPE)] = "Incorrect map type",
+ [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE)] = "Incorrect map key size",
+ [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE)] = "Incorrect map value size",
+ [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT)] = "Event not found for map setting",
+ [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE)] = "Invalid map size for event setting",
+ [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM)] = "Event dimension too large",
+ [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH)] = "Doesn't support inherit event",
+ [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE)] = "Wrong event type for map",
+ [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG)] = "Index too large",
};
static int
@@ -872,3 +1564,29 @@ int bpf__strerror_load(struct bpf_object *obj,
bpf__strerror_end(buf, size);
return 0;
}
+
+int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused,
+ struct parse_events_term *term __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused,
+ int *error_pos __maybe_unused, int err,
+ char *buf, size_t size)
+{
+ bpf__strerror_head(err, buf, size);
+ bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE,
+ "Can't use this config term with this map type");
+ bpf__strerror_end(buf, size);
+ return 0;
+}
+
+int bpf__strerror_apply_obj_config(int err, char *buf, size_t size)
+{
+ bpf__strerror_head(err, buf, size);
+ bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM,
+ "Cannot set event to BPF map in multi-thread tracing");
+ bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH,
+ "%s (Hint: use -i to turn off inherit)", emsg);
+ bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE,
+ "Can only put raw, hardware and BPF output event into a BPF map");
+ bpf__strerror_end(buf, size);
+ return 0;
+}
diff --git a/tools/perf/util/bpf-loader.h b/tools/perf/util/bpf-loader.h
index 6fdc0457e2b6..be4311944e3d 100644
--- a/tools/perf/util/bpf-loader.h
+++ b/tools/perf/util/bpf-loader.h
@@ -10,6 +10,7 @@
#include <string.h>
#include <bpf/libbpf.h>
#include "probe-event.h"
+#include "evlist.h"
#include "debug.h"
enum bpf_loader_errno {
@@ -24,10 +25,25 @@ enum bpf_loader_errno {
BPF_LOADER_ERRNO__PROLOGUE, /* Failed to generate prologue */
BPF_LOADER_ERRNO__PROLOGUE2BIG, /* Prologue too big for program */
BPF_LOADER_ERRNO__PROLOGUEOOB, /* Offset out of bound for prologue */
+ BPF_LOADER_ERRNO__OBJCONF_OPT, /* Invalid object config option */
+ BPF_LOADER_ERRNO__OBJCONF_CONF, /* Config value not set (lost '=')) */
+ BPF_LOADER_ERRNO__OBJCONF_MAP_OPT, /* Invalid object map config option */
+ BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST, /* Target map not exist */
+ BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE, /* Incorrect value type for map */
+ BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE, /* Incorrect map type */
+ BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE, /* Incorrect map key size */
+ BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE,/* Incorrect map value size */
+ BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT, /* Event not found for map setting */
+ BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE, /* Invalid map size for event setting */
+ BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM, /* Event dimension too large */
+ BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH, /* Doesn't support inherit event */
+ BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE, /* Wrong event type for map */
+ BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG, /* Index too large */
__BPF_LOADER_ERRNO__END,
};
struct bpf_object;
+struct parse_events_term;
#define PERF_BPF_PROBE_GROUP "perf_bpf_probe"
typedef int (*bpf_prog_iter_callback_t)(struct probe_trace_event *tev,
@@ -53,6 +69,16 @@ int bpf__strerror_load(struct bpf_object *obj, int err,
char *buf, size_t size);
int bpf__foreach_tev(struct bpf_object *obj,
bpf_prog_iter_callback_t func, void *arg);
+
+int bpf__config_obj(struct bpf_object *obj, struct parse_events_term *term,
+ struct perf_evlist *evlist, int *error_pos);
+int bpf__strerror_config_obj(struct bpf_object *obj,
+ struct parse_events_term *term,
+ struct perf_evlist *evlist,
+ int *error_pos, int err, char *buf,
+ size_t size);
+int bpf__apply_obj_config(void);
+int bpf__strerror_apply_obj_config(int err, char *buf, size_t size);
#else
static inline struct bpf_object *
bpf__prepare_load(const char *filename __maybe_unused,
@@ -84,6 +110,21 @@ bpf__foreach_tev(struct bpf_object *obj __maybe_unused,
}
static inline int
+bpf__config_obj(struct bpf_object *obj __maybe_unused,
+ struct parse_events_term *term __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused,
+ int *error_pos __maybe_unused)
+{
+ return 0;
+}
+
+static inline int
+bpf__apply_obj_config(void)
+{
+ return 0;
+}
+
+static inline int
__bpf_strerror(char *buf, size_t size)
{
if (!size)
@@ -118,5 +159,23 @@ static inline int bpf__strerror_load(struct bpf_object *obj __maybe_unused,
{
return __bpf_strerror(buf, size);
}
+
+static inline int
+bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused,
+ struct parse_events_term *term __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused,
+ int *error_pos __maybe_unused,
+ int err __maybe_unused,
+ char *buf, size_t size)
+{
+ return __bpf_strerror(buf, size);
+}
+
+static inline int
+bpf__strerror_apply_obj_config(int err __maybe_unused,
+ char *buf, size_t size)
+{
+ return __bpf_strerror(buf, size);
+}
#endif
#endif
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 6a7e273a514a..f1479eeef7da 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -166,6 +166,50 @@ char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size)
return build_id__filename(build_id_hex, bf, size);
}
+bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size)
+{
+ char *id_name, *ch;
+ struct stat sb;
+
+ id_name = dso__build_id_filename(dso, bf, size);
+ if (!id_name)
+ goto err;
+ if (access(id_name, F_OK))
+ goto err;
+ if (lstat(id_name, &sb) == -1)
+ goto err;
+ if ((size_t)sb.st_size > size - 1)
+ goto err;
+ if (readlink(id_name, bf, size - 1) < 0)
+ goto err;
+
+ bf[sb.st_size] = '\0';
+
+ /*
+ * link should be:
+ * ../../lib/modules/4.4.0-rc4/kernel/net/ipv4/netfilter/nf_nat_ipv4.ko/a09fe3eb3147dafa4e3b31dbd6257e4d696bdc92
+ */
+ ch = strrchr(bf, '/');
+ if (!ch)
+ goto err;
+ if (ch - 3 < bf)
+ goto err;
+
+ return strncmp(".ko", ch - 3, 3) == 0;
+err:
+ /*
+ * If dso__build_id_filename work, get id_name again,
+ * because id_name points to bf and is broken.
+ */
+ if (id_name)
+ id_name = dso__build_id_filename(dso, bf, size);
+ pr_err("Invalid build id: %s\n", id_name ? :
+ dso->long_name ? :
+ dso->short_name ? :
+ "[unknown]");
+ return false;
+}
+
#define dsos__for_each_with_build_id(pos, head) \
list_for_each_entry(pos, head, node) \
if (!pos->has_build_id) \
@@ -211,6 +255,7 @@ static int machine__write_buildid_table(struct machine *machine, int fd)
dsos__for_each_with_build_id(pos, &machine->dsos.head) {
const char *name;
size_t name_len;
+ bool in_kernel = false;
if (!pos->hit)
continue;
@@ -227,8 +272,11 @@ static int machine__write_buildid_table(struct machine *machine, int fd)
name_len = pos->long_name_len + 1;
}
+ in_kernel = pos->kernel ||
+ is_kernel_module(name,
+ PERF_RECORD_MISC_CPUMODE_UNKNOWN);
err = write_buildid(name, name_len, pos->build_id, machine->pid,
- pos->kernel ? kmisc : umisc, fd);
+ in_kernel ? kmisc : umisc, fd);
if (err)
break;
}
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
index 27a14a8a945b..64af3e20610d 100644
--- a/tools/perf/util/build-id.h
+++ b/tools/perf/util/build-id.h
@@ -16,6 +16,7 @@ int sysfs__sprintf_build_id(const char *root_dir, char *sbuild_id);
int filename__sprintf_build_id(const char *pathname, char *sbuild_id);
char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size);
+bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size);
int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample, struct perf_evsel *evsel,
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index 07b5d63947b1..3ca453f0c51f 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -23,6 +23,8 @@
#define PERF_TRACEFS_ENVIRONMENT "PERF_TRACEFS_DIR"
#define PERF_PAGER_ENVIRONMENT "PERF_PAGER"
+extern const char *config_exclusive_filename;
+
typedef int (*config_fn_t)(const char *, const char *, void *);
extern int perf_default_config(const char *, const char *, void *);
extern int perf_config(config_fn_t fn, void *);
@@ -31,6 +33,7 @@ extern u64 perf_config_u64(const char *, const char *);
extern int perf_config_bool(const char *, const char *);
extern int config_error_nonbool(const char *);
extern const char *perf_config_dirname(const char *, const char *);
+extern const char *perf_etc_perfconfig(void);
char *alias_lookup(const char *alias);
int split_cmdline(char *cmdline, const char ***argv);
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 53c43eb9489e..24b4bd0d7754 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -416,7 +416,7 @@ create_child(struct callchain_node *parent, bool inherit_children)
/*
* Fill the node with callchain values
*/
-static void
+static int
fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
{
struct callchain_cursor_node *cursor_node;
@@ -433,7 +433,7 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
call = zalloc(sizeof(*call));
if (!call) {
perror("not enough memory for the code path tree");
- return;
+ return -1;
}
call->ip = cursor_node->ip;
call->ms.sym = cursor_node->sym;
@@ -443,6 +443,7 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
callchain_cursor_advance(cursor);
cursor_node = callchain_cursor_current(cursor);
}
+ return 0;
}
static struct callchain_node *
@@ -453,7 +454,19 @@ add_child(struct callchain_node *parent,
struct callchain_node *new;
new = create_child(parent, false);
- fill_node(new, cursor);
+ if (new == NULL)
+ return NULL;
+
+ if (fill_node(new, cursor) < 0) {
+ struct callchain_list *call, *tmp;
+
+ list_for_each_entry_safe(call, tmp, &new->val, list) {
+ list_del(&call->list);
+ free(call);
+ }
+ free(new);
+ return NULL;
+ }
new->children_hit = 0;
new->hit = period;
@@ -462,16 +475,32 @@ add_child(struct callchain_node *parent,
return new;
}
-static s64 match_chain(struct callchain_cursor_node *node,
- struct callchain_list *cnode)
+enum match_result {
+ MATCH_ERROR = -1,
+ MATCH_EQ,
+ MATCH_LT,
+ MATCH_GT,
+};
+
+static enum match_result match_chain(struct callchain_cursor_node *node,
+ struct callchain_list *cnode)
{
struct symbol *sym = node->sym;
+ u64 left, right;
if (cnode->ms.sym && sym &&
- callchain_param.key == CCKEY_FUNCTION)
- return cnode->ms.sym->start - sym->start;
- else
- return cnode->ip - node->ip;
+ callchain_param.key == CCKEY_FUNCTION) {
+ left = cnode->ms.sym->start;
+ right = sym->start;
+ } else {
+ left = cnode->ip;
+ right = node->ip;
+ }
+
+ if (left == right)
+ return MATCH_EQ;
+
+ return left > right ? MATCH_GT : MATCH_LT;
}
/*
@@ -479,7 +508,7 @@ static s64 match_chain(struct callchain_cursor_node *node,
* give a part of its callchain to the created child.
* Then create another child to host the given callchain of new branch
*/
-static void
+static int
split_add_child(struct callchain_node *parent,
struct callchain_cursor *cursor,
struct callchain_list *to_split,
@@ -491,6 +520,8 @@ split_add_child(struct callchain_node *parent,
/* split */
new = create_child(parent, true);
+ if (new == NULL)
+ return -1;
/* split the callchain and move a part to the new child */
old_tail = parent->val.prev;
@@ -524,6 +555,8 @@ split_add_child(struct callchain_node *parent,
node = callchain_cursor_current(cursor);
new = add_child(parent, cursor, period);
+ if (new == NULL)
+ return -1;
/*
* This is second child since we moved parent's children
@@ -534,7 +567,7 @@ split_add_child(struct callchain_node *parent,
cnode = list_first_entry(&first->val, struct callchain_list,
list);
- if (match_chain(node, cnode) < 0)
+ if (match_chain(node, cnode) == MATCH_LT)
pp = &p->rb_left;
else
pp = &p->rb_right;
@@ -545,14 +578,15 @@ split_add_child(struct callchain_node *parent,
parent->hit = period;
parent->count = 1;
}
+ return 0;
}
-static int
+static enum match_result
append_chain(struct callchain_node *root,
struct callchain_cursor *cursor,
u64 period);
-static void
+static int
append_chain_children(struct callchain_node *root,
struct callchain_cursor *cursor,
u64 period)
@@ -564,36 +598,42 @@ append_chain_children(struct callchain_node *root,
node = callchain_cursor_current(cursor);
if (!node)
- return;
+ return -1;
/* lookup in childrens */
while (*p) {
- s64 ret;
+ enum match_result ret;
parent = *p;
rnode = rb_entry(parent, struct callchain_node, rb_node_in);
/* If at least first entry matches, rely to children */
ret = append_chain(rnode, cursor, period);
- if (ret == 0)
+ if (ret == MATCH_EQ)
goto inc_children_hit;
+ if (ret == MATCH_ERROR)
+ return -1;
- if (ret < 0)
+ if (ret == MATCH_LT)
p = &parent->rb_left;
else
p = &parent->rb_right;
}
/* nothing in children, add to the current node */
rnode = add_child(root, cursor, period);
+ if (rnode == NULL)
+ return -1;
+
rb_link_node(&rnode->rb_node_in, parent, p);
rb_insert_color(&rnode->rb_node_in, &root->rb_root_in);
inc_children_hit:
root->children_hit += period;
root->children_count++;
+ return 0;
}
-static int
+static enum match_result
append_chain(struct callchain_node *root,
struct callchain_cursor *cursor,
u64 period)
@@ -602,7 +642,7 @@ append_chain(struct callchain_node *root,
u64 start = cursor->pos;
bool found = false;
u64 matches;
- int cmp = 0;
+ enum match_result cmp = MATCH_ERROR;
/*
* Lookup in the current node
@@ -618,7 +658,7 @@ append_chain(struct callchain_node *root,
break;
cmp = match_chain(node, cnode);
- if (cmp)
+ if (cmp != MATCH_EQ)
break;
found = true;
@@ -628,7 +668,7 @@ append_chain(struct callchain_node *root,
/* matches not, relay no the parent */
if (!found) {
- WARN_ONCE(!cmp, "Chain comparison error\n");
+ WARN_ONCE(cmp == MATCH_ERROR, "Chain comparison error\n");
return cmp;
}
@@ -636,21 +676,25 @@ append_chain(struct callchain_node *root,
/* we match only a part of the node. Split it and add the new chain */
if (matches < root->val_nr) {
- split_add_child(root, cursor, cnode, start, matches, period);
- return 0;
+ if (split_add_child(root, cursor, cnode, start, matches,
+ period) < 0)
+ return MATCH_ERROR;
+
+ return MATCH_EQ;
}
/* we match 100% of the path, increment the hit */
if (matches == root->val_nr && cursor->pos == cursor->nr) {
root->hit += period;
root->count++;
- return 0;
+ return MATCH_EQ;
}
/* We match the node and still have a part remaining */
- append_chain_children(root, cursor, period);
+ if (append_chain_children(root, cursor, period) < 0)
+ return MATCH_ERROR;
- return 0;
+ return MATCH_EQ;
}
int callchain_append(struct callchain_root *root,
@@ -662,7 +706,8 @@ int callchain_append(struct callchain_root *root,
callchain_cursor_commit(cursor);
- append_chain_children(&root->node, cursor, period);
+ if (append_chain_children(&root->node, cursor, period) < 0)
+ return -1;
if (cursor->nr > root->max_depth)
root->max_depth = cursor->nr;
@@ -690,7 +735,8 @@ merge_chain_branch(struct callchain_cursor *cursor,
if (src->hit) {
callchain_cursor_commit(cursor);
- append_chain_children(dst, cursor, src->hit);
+ if (append_chain_children(dst, cursor, src->hit) < 0)
+ return -1;
}
n = rb_first(&src->rb_root_in);
diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c
index e5fb88bab9e1..43e84aa27e4a 100644
--- a/tools/perf/util/color.c
+++ b/tools/perf/util/color.c
@@ -32,14 +32,15 @@ int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty)
return 0;
}
-int perf_color_default_config(const char *var, const char *value, void *cb)
+int perf_color_default_config(const char *var, const char *value,
+ void *cb __maybe_unused)
{
if (!strcmp(var, "color.ui")) {
perf_use_color_default = perf_config_colorbool(var, value, -1);
return 0;
}
- return perf_default_config(var, value, cb);
+ return 0;
}
static int __color_vsnprintf(char *bf, size_t size, const char *color,
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index d3e12e30e1d5..4e727635476e 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -26,7 +26,7 @@ static const char *config_file_name;
static int config_linenr;
static int config_file_eof;
-static const char *config_exclusive_filename;
+const char *config_exclusive_filename;
static int get_next_char(void)
{
@@ -434,7 +434,7 @@ static int perf_config_from_file(config_fn_t fn, const char *filename, void *dat
return ret;
}
-static const char *perf_etc_perfconfig(void)
+const char *perf_etc_perfconfig(void)
{
static const char *system_wide;
if (!system_wide)
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index fa935093a599..9bcf2bed3a6d 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -8,6 +8,10 @@
#include <linux/bitmap.h>
#include "asm/bug.h"
+static int max_cpu_num;
+static int max_node_num;
+static int *cpunode_map;
+
static struct cpu_map *cpu_map__default_new(void)
{
struct cpu_map *cpus;
@@ -486,6 +490,32 @@ out:
pr_err("Failed to read max nodes, using default of %d\n", max_node_num);
}
+int cpu__max_node(void)
+{
+ if (unlikely(!max_node_num))
+ set_max_node_num();
+
+ return max_node_num;
+}
+
+int cpu__max_cpu(void)
+{
+ if (unlikely(!max_cpu_num))
+ set_max_cpu_num();
+
+ return max_cpu_num;
+}
+
+int cpu__get_node(int cpu)
+{
+ if (unlikely(cpunode_map == NULL)) {
+ pr_debug("cpu_map not initialized\n");
+ return -1;
+ }
+
+ return cpunode_map[cpu];
+}
+
static int init_cpunode_map(void)
{
int i;
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
index 71c41b9efabb..81a2562aaa2b 100644
--- a/tools/perf/util/cpumap.h
+++ b/tools/perf/util/cpumap.h
@@ -57,37 +57,11 @@ static inline bool cpu_map__empty(const struct cpu_map *map)
return map ? map->map[0] == -1 : true;
}
-int max_cpu_num;
-int max_node_num;
-int *cpunode_map;
-
int cpu__setup_cpunode_map(void);
-static inline int cpu__max_node(void)
-{
- if (unlikely(!max_node_num))
- pr_debug("cpu_map not initialized\n");
-
- return max_node_num;
-}
-
-static inline int cpu__max_cpu(void)
-{
- if (unlikely(!max_cpu_num))
- pr_debug("cpu_map not initialized\n");
-
- return max_cpu_num;
-}
-
-static inline int cpu__get_node(int cpu)
-{
- if (unlikely(cpunode_map == NULL)) {
- pr_debug("cpu_map not initialized\n");
- return -1;
- }
-
- return cpunode_map[cpu];
-}
+int cpu__max_node(void);
+int cpu__max_cpu(void);
+int cpu__get_node(int cpu);
int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
int (*f)(struct cpu_map *map, int cpu, void *data),
diff --git a/tools/perf/util/ctype.c b/tools/perf/util/ctype.c
index aada3ac5e891..d4a5a21c2a7e 100644
--- a/tools/perf/util/ctype.c
+++ b/tools/perf/util/ctype.c
@@ -32,8 +32,17 @@ unsigned char sane_ctype[256] = {
const char *graph_line =
"_____________________________________________________________________"
+ "_____________________________________________________________________"
"_____________________________________________________________________";
const char *graph_dotted_line =
"---------------------------------------------------------------------"
"---------------------------------------------------------------------"
"---------------------------------------------------------------------";
+const char *spaces =
+ " "
+ " "
+ " ";
+const char *dots =
+ "....................................................................."
+ "....................................................................."
+ ".....................................................................";
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
index 34cd1e4039d3..811af89ce0bb 100644
--- a/tools/perf/util/data-convert-bt.c
+++ b/tools/perf/util/data-convert-bt.c
@@ -352,6 +352,84 @@ static int add_tracepoint_values(struct ctf_writer *cw,
return ret;
}
+static int
+add_bpf_output_values(struct bt_ctf_event_class *event_class,
+ struct bt_ctf_event *event,
+ struct perf_sample *sample)
+{
+ struct bt_ctf_field_type *len_type, *seq_type;
+ struct bt_ctf_field *len_field, *seq_field;
+ unsigned int raw_size = sample->raw_size;
+ unsigned int nr_elements = raw_size / sizeof(u32);
+ unsigned int i;
+ int ret;
+
+ if (nr_elements * sizeof(u32) != raw_size)
+ pr_warning("Incorrect raw_size (%u) in bpf output event, skip %lu bytes\n",
+ raw_size, nr_elements * sizeof(u32) - raw_size);
+
+ len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
+ len_field = bt_ctf_field_create(len_type);
+ if (!len_field) {
+ pr_err("failed to create 'raw_len' for bpf output event\n");
+ ret = -1;
+ goto put_len_type;
+ }
+
+ ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
+ if (ret) {
+ pr_err("failed to set field value for raw_len\n");
+ goto put_len_field;
+ }
+ ret = bt_ctf_event_set_payload(event, "raw_len", len_field);
+ if (ret) {
+ pr_err("failed to set payload to raw_len\n");
+ goto put_len_field;
+ }
+
+ seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data");
+ seq_field = bt_ctf_field_create(seq_type);
+ if (!seq_field) {
+ pr_err("failed to create 'raw_data' for bpf output event\n");
+ ret = -1;
+ goto put_seq_type;
+ }
+
+ ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
+ if (ret) {
+ pr_err("failed to set length of 'raw_data'\n");
+ goto put_seq_field;
+ }
+
+ for (i = 0; i < nr_elements; i++) {
+ struct bt_ctf_field *elem_field =
+ bt_ctf_field_sequence_get_field(seq_field, i);
+
+ ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
+ ((u32 *)(sample->raw_data))[i]);
+
+ bt_ctf_field_put(elem_field);
+ if (ret) {
+ pr_err("failed to set raw_data[%d]\n", i);
+ goto put_seq_field;
+ }
+ }
+
+ ret = bt_ctf_event_set_payload(event, "raw_data", seq_field);
+ if (ret)
+ pr_err("failed to set payload for raw_data\n");
+
+put_seq_field:
+ bt_ctf_field_put(seq_field);
+put_seq_type:
+ bt_ctf_field_type_put(seq_type);
+put_len_field:
+ bt_ctf_field_put(len_field);
+put_len_type:
+ bt_ctf_field_type_put(len_type);
+ return ret;
+}
+
static int add_generic_values(struct ctf_writer *cw,
struct bt_ctf_event *event,
struct perf_evsel *evsel,
@@ -597,6 +675,12 @@ static int process_sample_event(struct perf_tool *tool,
return -1;
}
+ if (perf_evsel__is_bpf_output(evsel)) {
+ ret = add_bpf_output_values(event_class, event, sample);
+ if (ret)
+ return -1;
+ }
+
cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
if (cs) {
if (is_flush_needed(cs))
@@ -744,6 +828,25 @@ static int add_tracepoint_types(struct ctf_writer *cw,
return ret;
}
+static int add_bpf_output_types(struct ctf_writer *cw,
+ struct bt_ctf_event_class *class)
+{
+ struct bt_ctf_field_type *len_type = cw->data.u32;
+ struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex;
+ struct bt_ctf_field_type *seq_type;
+ int ret;
+
+ ret = bt_ctf_event_class_add_field(class, len_type, "raw_len");
+ if (ret)
+ return ret;
+
+ seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len");
+ if (!seq_type)
+ return -1;
+
+ return bt_ctf_event_class_add_field(class, seq_type, "raw_data");
+}
+
static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel,
struct bt_ctf_event_class *event_class)
{
@@ -755,7 +858,8 @@ static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel,
* ctf event header
* PERF_SAMPLE_READ - TODO
* PERF_SAMPLE_CALLCHAIN - TODO
- * PERF_SAMPLE_RAW - tracepoint fields are handled separately
+ * PERF_SAMPLE_RAW - tracepoint fields and BPF output
+ * are handled separately
* PERF_SAMPLE_BRANCH_STACK - TODO
* PERF_SAMPLE_REGS_USER - TODO
* PERF_SAMPLE_STACK_USER - TODO
@@ -824,6 +928,12 @@ static int add_event(struct ctf_writer *cw, struct perf_evsel *evsel)
goto err;
}
+ if (perf_evsel__is_bpf_output(evsel)) {
+ ret = add_bpf_output_types(cw, event_class);
+ if (ret)
+ goto err;
+ }
+
ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
if (ret) {
pr("Failed to add event class into stream.\n");
@@ -858,6 +968,23 @@ static int setup_events(struct ctf_writer *cw, struct perf_session *session)
return 0;
}
+static void cleanup_events(struct perf_session *session)
+{
+ struct perf_evlist *evlist = session->evlist;
+ struct perf_evsel *evsel;
+
+ evlist__for_each(evlist, evsel) {
+ struct evsel_priv *priv;
+
+ priv = evsel->priv;
+ bt_ctf_event_class_put(priv->event_class);
+ zfree(&evsel->priv);
+ }
+
+ perf_evlist__delete(evlist);
+ session->evlist = NULL;
+}
+
static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
{
struct ctf_stream **stream;
@@ -953,6 +1080,12 @@ static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
goto err;
+#if __BYTE_ORDER == __BIG_ENDIAN
+ bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN);
+#else
+ bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN);
+#endif
+
pr2("Created type: INTEGER %d-bit %ssigned %s\n",
size, sign ? "un" : "", hex ? "hex" : "");
return type;
@@ -1100,7 +1233,7 @@ static int convert__config(const char *var, const char *value, void *cb)
return 0;
}
- return perf_default_config(var, value, cb);
+ return 0;
}
int bt_convert__perf2ctf(const char *input, const char *path, bool force)
@@ -1171,6 +1304,7 @@ int bt_convert__perf2ctf(const char *input, const char *path, bool force)
(double) c.events_size / 1024.0 / 1024.0,
c.events_count);
+ cleanup_events(session);
perf_session__delete(session);
ctf_writer__cleanup(cw);
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index 86d9c7302598..8c4212abd19b 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -5,6 +5,7 @@
#include <string.h>
#include <stdarg.h>
#include <stdio.h>
+#include <api/debug.h>
#include "cache.h"
#include "color.h"
@@ -22,7 +23,7 @@ int debug_ordered_events;
static int redirect_to_stderr;
int debug_data_convert;
-static int _eprintf(int level, int var, const char *fmt, va_list args)
+int veprintf(int level, int var, const char *fmt, va_list args)
{
int ret = 0;
@@ -36,24 +37,19 @@ static int _eprintf(int level, int var, const char *fmt, va_list args)
return ret;
}
-int veprintf(int level, int var, const char *fmt, va_list args)
-{
- return _eprintf(level, var, fmt, args);
-}
-
int eprintf(int level, int var, const char *fmt, ...)
{
va_list args;
int ret;
va_start(args, fmt);
- ret = _eprintf(level, var, fmt, args);
+ ret = veprintf(level, var, fmt, args);
va_end(args);
return ret;
}
-static int __eprintf_time(u64 t, const char *fmt, va_list args)
+static int veprintf_time(u64 t, const char *fmt, va_list args)
{
int ret = 0;
u64 secs, usecs, nsecs = t;
@@ -75,7 +71,7 @@ int eprintf_time(int level, int var, u64 t, const char *fmt, ...)
if (var >= level) {
va_start(args, fmt);
- ret = __eprintf_time(t, fmt, args);
+ ret = veprintf_time(t, fmt, args);
va_end(args);
}
@@ -91,7 +87,7 @@ void pr_stat(const char *fmt, ...)
va_list args;
va_start(args, fmt);
- _eprintf(1, verbose, fmt, args);
+ veprintf(1, verbose, fmt, args);
va_end(args);
eprintf(1, verbose, "\n");
}
@@ -110,40 +106,61 @@ int dump_printf(const char *fmt, ...)
return ret;
}
+static void trace_event_printer(enum binary_printer_ops op,
+ unsigned int val, void *extra)
+{
+ const char *color = PERF_COLOR_BLUE;
+ union perf_event *event = (union perf_event *)extra;
+ unsigned char ch = (unsigned char)val;
+
+ switch (op) {
+ case BINARY_PRINT_DATA_BEGIN:
+ printf(".");
+ color_fprintf(stdout, color, "\n. ... raw event: size %d bytes\n",
+ event->header.size);
+ break;
+ case BINARY_PRINT_LINE_BEGIN:
+ printf(".");
+ break;
+ case BINARY_PRINT_ADDR:
+ color_fprintf(stdout, color, " %04x: ", val);
+ break;
+ case BINARY_PRINT_NUM_DATA:
+ color_fprintf(stdout, color, " %02x", val);
+ break;
+ case BINARY_PRINT_NUM_PAD:
+ color_fprintf(stdout, color, " ");
+ break;
+ case BINARY_PRINT_SEP:
+ color_fprintf(stdout, color, " ");
+ break;
+ case BINARY_PRINT_CHAR_DATA:
+ color_fprintf(stdout, color, "%c",
+ isprint(ch) ? ch : '.');
+ break;
+ case BINARY_PRINT_CHAR_PAD:
+ color_fprintf(stdout, color, " ");
+ break;
+ case BINARY_PRINT_LINE_END:
+ color_fprintf(stdout, color, "\n");
+ break;
+ case BINARY_PRINT_DATA_END:
+ printf("\n");
+ break;
+ default:
+ break;
+ }
+}
+
void trace_event(union perf_event *event)
{
unsigned char *raw_event = (void *)event;
- const char *color = PERF_COLOR_BLUE;
- int i, j;
if (!dump_trace)
return;
- printf(".");
- color_fprintf(stdout, color, "\n. ... raw event: size %d bytes\n",
- event->header.size);
-
- for (i = 0; i < event->header.size; i++) {
- if ((i & 15) == 0) {
- printf(".");
- color_fprintf(stdout, color, " %04x: ", i);
- }
-
- color_fprintf(stdout, color, " %02x", raw_event[i]);
-
- if (((i & 15) == 15) || i == event->header.size-1) {
- color_fprintf(stdout, color, " ");
- for (j = 0; j < 15-(i & 15); j++)
- color_fprintf(stdout, color, " ");
- for (j = i & ~15; j <= i; j++) {
- color_fprintf(stdout, color, "%c",
- isprint(raw_event[j]) ?
- raw_event[j] : '.');
- }
- color_fprintf(stdout, color, "\n");
- }
- }
- printf(".\n");
+ print_binary(raw_event, event->header.size, 16,
+ trace_event_printer, event);
}
static struct debug_variable {
@@ -192,3 +209,23 @@ int perf_debug_option(const char *str)
free(s);
return 0;
}
+
+#define DEBUG_WRAPPER(__n, __l) \
+static int pr_ ## __n ## _wrapper(const char *fmt, ...) \
+{ \
+ va_list args; \
+ int ret; \
+ \
+ va_start(args, fmt); \
+ ret = veprintf(__l, verbose, fmt, args); \
+ va_end(args); \
+ return ret; \
+}
+
+DEBUG_WRAPPER(warning, 0);
+DEBUG_WRAPPER(debug, 1);
+
+void perf_debug_setup(void)
+{
+ libapi_set_print(pr_warning_wrapper, pr_warning_wrapper, pr_debug_wrapper);
+}
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index 8b9a088c32ab..14bafda79eda 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -53,5 +53,6 @@ int eprintf_time(int level, int var, u64 t, const char *fmt, ...) __attribute__(
int veprintf(int level, int var, const char *fmt, va_list args);
int perf_debug_option(const char *str);
+void perf_debug_setup(void);
#endif /* __PERF_DEBUG_H */
diff --git a/tools/perf/util/demangle-java.c b/tools/perf/util/demangle-java.c
new file mode 100644
index 000000000000..3e6062ab2cdd
--- /dev/null
+++ b/tools/perf/util/demangle-java.c
@@ -0,0 +1,199 @@
+#include <sys/types.h>
+#include <stdio.h>
+#include <string.h>
+#include "util.h"
+#include "debug.h"
+#include "symbol.h"
+
+#include "demangle-java.h"
+
+enum {
+ MODE_PREFIX = 0,
+ MODE_CLASS = 1,
+ MODE_FUNC = 2,
+ MODE_TYPE = 3,
+ MODE_CTYPE = 3, /* class arg */
+};
+
+#define BASE_ENT(c, n) [c - 'A']=n
+static const char *base_types['Z' - 'A' + 1] = {
+ BASE_ENT('B', "byte" ),
+ BASE_ENT('C', "char" ),
+ BASE_ENT('D', "double" ),
+ BASE_ENT('F', "float" ),
+ BASE_ENT('I', "int" ),
+ BASE_ENT('J', "long" ),
+ BASE_ENT('S', "short" ),
+ BASE_ENT('Z', "bool" ),
+};
+
+/*
+ * demangle Java symbol between str and end positions and stores
+ * up to maxlen characters into buf. The parser starts in mode.
+ *
+ * Use MODE_PREFIX to process entire prototype till end position
+ * Use MODE_TYPE to process return type if str starts on return type char
+ *
+ * Return:
+ * success: buf
+ * error : NULL
+ */
+static char *
+__demangle_java_sym(const char *str, const char *end, char *buf, int maxlen, int mode)
+{
+ int rlen = 0;
+ int array = 0;
+ int narg = 0;
+ const char *q;
+
+ if (!end)
+ end = str + strlen(str);
+
+ for (q = str; q != end; q++) {
+
+ if (rlen == (maxlen - 1))
+ break;
+
+ switch (*q) {
+ case 'L':
+ if (mode == MODE_PREFIX || mode == MODE_CTYPE) {
+ if (mode == MODE_CTYPE) {
+ if (narg)
+ rlen += scnprintf(buf + rlen, maxlen - rlen, ", ");
+ narg++;
+ }
+ rlen += scnprintf(buf + rlen, maxlen - rlen, "class ");
+ if (mode == MODE_PREFIX)
+ mode = MODE_CLASS;
+ } else
+ buf[rlen++] = *q;
+ break;
+ case 'B':
+ case 'C':
+ case 'D':
+ case 'F':
+ case 'I':
+ case 'J':
+ case 'S':
+ case 'Z':
+ if (mode == MODE_TYPE) {
+ if (narg)
+ rlen += scnprintf(buf + rlen, maxlen - rlen, ", ");
+ rlen += scnprintf(buf + rlen, maxlen - rlen, "%s", base_types[*q - 'A']);
+ while (array--)
+ rlen += scnprintf(buf + rlen, maxlen - rlen, "[]");
+ array = 0;
+ narg++;
+ } else
+ buf[rlen++] = *q;
+ break;
+ case 'V':
+ if (mode == MODE_TYPE) {
+ rlen += scnprintf(buf + rlen, maxlen - rlen, "void");
+ while (array--)
+ rlen += scnprintf(buf + rlen, maxlen - rlen, "[]");
+ array = 0;
+ } else
+ buf[rlen++] = *q;
+ break;
+ case '[':
+ if (mode != MODE_TYPE)
+ goto error;
+ array++;
+ break;
+ case '(':
+ if (mode != MODE_FUNC)
+ goto error;
+ buf[rlen++] = *q;
+ mode = MODE_TYPE;
+ break;
+ case ')':
+ if (mode != MODE_TYPE)
+ goto error;
+ buf[rlen++] = *q;
+ narg = 0;
+ break;
+ case ';':
+ if (mode != MODE_CLASS && mode != MODE_CTYPE)
+ goto error;
+ /* safe because at least one other char to process */
+ if (isalpha(*(q + 1)))
+ rlen += scnprintf(buf + rlen, maxlen - rlen, ".");
+ if (mode == MODE_CLASS)
+ mode = MODE_FUNC;
+ else if (mode == MODE_CTYPE)
+ mode = MODE_TYPE;
+ break;
+ case '/':
+ if (mode != MODE_CLASS && mode != MODE_CTYPE)
+ goto error;
+ rlen += scnprintf(buf + rlen, maxlen - rlen, ".");
+ break;
+ default :
+ buf[rlen++] = *q;
+ }
+ }
+ buf[rlen] = '\0';
+ return buf;
+error:
+ return NULL;
+}
+
+/*
+ * Demangle Java function signature (openJDK, not GCJ)
+ * input:
+ * str: string to parse. String is not modified
+ * flags: comobination of JAVA_DEMANGLE_* flags to modify demangling
+ * return:
+ * if input can be demangled, then a newly allocated string is returned.
+ * if input cannot be demangled, then NULL is returned
+ *
+ * Note: caller is responsible for freeing demangled string
+ */
+char *
+java_demangle_sym(const char *str, int flags)
+{
+ char *buf, *ptr;
+ char *p;
+ size_t len, l1 = 0;
+
+ if (!str)
+ return NULL;
+
+ /* find start of retunr type */
+ p = strrchr(str, ')');
+ if (!p)
+ return NULL;
+
+ /*
+ * expansion factor estimated to 3x
+ */
+ len = strlen(str) * 3 + 1;
+ buf = malloc(len);
+ if (!buf)
+ return NULL;
+
+ buf[0] = '\0';
+ if (!(flags & JAVA_DEMANGLE_NORET)) {
+ /*
+ * get return type first
+ */
+ ptr = __demangle_java_sym(p + 1, NULL, buf, len, MODE_TYPE);
+ if (!ptr)
+ goto error;
+
+ /* add space between return type and function prototype */
+ l1 = strlen(buf);
+ buf[l1++] = ' ';
+ }
+
+ /* process function up to return type */
+ ptr = __demangle_java_sym(str, p + 1, buf + l1, len - l1, MODE_PREFIX);
+ if (!ptr)
+ goto error;
+
+ return buf;
+error:
+ free(buf);
+ return NULL;
+}
diff --git a/tools/perf/util/demangle-java.h b/tools/perf/util/demangle-java.h
new file mode 100644
index 000000000000..a981c1f968fe
--- /dev/null
+++ b/tools/perf/util/demangle-java.h
@@ -0,0 +1,10 @@
+#ifndef __PERF_DEMANGLE_JAVA
+#define __PERF_DEMANGLE_JAVA 1
+/*
+ * demangle function flags
+ */
+#define JAVA_DEMANGLE_NORET 0x1 /* do not process return type */
+
+char * java_demangle_sym(const char *str, int flags);
+
+#endif /* __PERF_DEMANGLE_JAVA */
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index e8e9a9dbf5e3..8e6395439ca0 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -52,6 +52,11 @@ int dso__read_binary_type_filename(const struct dso *dso,
debuglink--;
if (*debuglink == '/')
debuglink++;
+
+ ret = -1;
+ if (!is_regular_file(filename))
+ break;
+
ret = filename__read_debuglink(filename, debuglink,
size - (debuglink - filename));
}
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 7dd5939dea2e..49a11d9d8b8f 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -6,6 +6,8 @@ struct perf_env perf_env;
void perf_env__exit(struct perf_env *env)
{
+ int i;
+
zfree(&env->hostname);
zfree(&env->os_release);
zfree(&env->version);
@@ -19,6 +21,10 @@ void perf_env__exit(struct perf_env *env)
zfree(&env->numa_nodes);
zfree(&env->pmu_mappings);
zfree(&env->cpu);
+
+ for (i = 0; i < env->caches_cnt; i++)
+ cpu_cache_level__free(&env->caches[i]);
+ zfree(&env->caches);
}
int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
@@ -75,3 +81,10 @@ int perf_env__read_cpu_topology_map(struct perf_env *env)
env->nr_cpus_avail = nr_cpus;
return 0;
}
+
+void cpu_cache_level__free(struct cpu_cache_level *cache)
+{
+ free(cache->type);
+ free(cache->map);
+ free(cache->size);
+}
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index 0132b9557c02..56cffb60a0b4 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -1,11 +1,23 @@
#ifndef __PERF_ENV_H
#define __PERF_ENV_H
+#include <linux/types.h>
+
struct cpu_topology_map {
int socket_id;
int core_id;
};
+struct cpu_cache_level {
+ u32 level;
+ u32 line_size;
+ u32 sets;
+ u32 ways;
+ char *type;
+ char *size;
+ char *map;
+};
+
struct perf_env {
char *hostname;
char *os_release;
@@ -31,6 +43,8 @@ struct perf_env {
char *numa_nodes;
char *pmu_mappings;
struct cpu_topology_map *cpu;
+ struct cpu_cache_level *caches;
+ int caches_cnt;
};
extern struct perf_env perf_env;
@@ -41,4 +55,5 @@ int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]);
int perf_env__read_cpu_topology_map(struct perf_env *env);
+void cpu_cache_level__free(struct cpu_cache_level *cache);
#endif /* __PERF_ENV_H */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 85155e91b61b..7bad5c3fa7b7 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -282,7 +282,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
strcpy(execname, "");
/* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
- n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
+ n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
&event->mmap2.start, &event->mmap2.len, prot,
&event->mmap2.pgoff, &event->mmap2.maj,
&event->mmap2.min,
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index d81f13de2476..86a03836a83f 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1181,12 +1181,12 @@ void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
*/
if (cpus != evlist->cpus) {
cpu_map__put(evlist->cpus);
- evlist->cpus = cpus;
+ evlist->cpus = cpu_map__get(cpus);
}
if (threads != evlist->threads) {
thread_map__put(evlist->threads);
- evlist->threads = threads;
+ evlist->threads = thread_map__get(threads);
}
perf_evlist__propagate_maps(evlist);
@@ -1223,6 +1223,9 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
int err = 0;
evlist__for_each(evlist, evsel) {
+ if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
+ continue;
+
err = perf_evsel__set_filter(evsel, filter);
if (err)
break;
@@ -1624,7 +1627,7 @@ size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
return printed + fprintf(fp, "\n");
}
-int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
+int perf_evlist__strerror_open(struct perf_evlist *evlist,
int err, char *buf, size_t size)
{
int printed, value;
@@ -1652,7 +1655,25 @@ int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
"Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
"Hint:\tThe current value is %d.", value);
break;
+ case EINVAL: {
+ struct perf_evsel *first = perf_evlist__first(evlist);
+ int max_freq;
+
+ if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
+ goto out_default;
+
+ if (first->attr.sample_freq < (u64)max_freq)
+ goto out_default;
+
+ printed = scnprintf(buf, size,
+ "Error:\t%s.\n"
+ "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
+ "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
+ emsg, max_freq, first->attr.sample_freq);
+ break;
+ }
default:
+out_default:
scnprintf(buf, size, "%s", emsg);
break;
}
@@ -1723,3 +1744,19 @@ void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
tracking_evsel->tracking = true;
}
+
+struct perf_evsel *
+perf_evlist__find_evsel_by_str(struct perf_evlist *evlist,
+ const char *str)
+{
+ struct perf_evsel *evsel;
+
+ evlist__for_each(evlist, evsel) {
+ if (!evsel->name)
+ continue;
+ if (strcmp(str, evsel->name) == 0)
+ return evsel;
+ }
+
+ return NULL;
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 7c4d9a206776..a0d15221db6e 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -294,4 +294,7 @@ void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
struct perf_evsel *tracking_evsel);
void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr);
+
+struct perf_evsel *
+perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str);
#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index cdbaf9b51e42..0902fe418754 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -225,6 +225,11 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
if (evsel != NULL)
perf_evsel__init(evsel, attr, idx);
+ if (perf_evsel__is_bpf_output(evsel)) {
+ evsel->attr.sample_type |= PERF_SAMPLE_RAW;
+ evsel->attr.sample_period = 1;
+ }
+
return evsel;
}
@@ -898,6 +903,16 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
if (evsel->precise_max)
perf_event_attr__set_max_precise_ip(attr);
+ if (opts->all_user) {
+ attr->exclude_kernel = 1;
+ attr->exclude_user = 0;
+ }
+
+ if (opts->all_kernel) {
+ attr->exclude_kernel = 0;
+ attr->exclude_user = 1;
+ }
+
/*
* Apply event specific term settings,
* it overloads any global configuration.
@@ -2362,12 +2377,15 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
case EPERM:
case EACCES:
return scnprintf(msg, size,
- "You may not have permission to collect %sstats.\n"
- "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
- " -1 - Not paranoid at all\n"
- " 0 - Disallow raw tracepoint access for unpriv\n"
- " 1 - Disallow cpu events for unpriv\n"
- " 2 - Disallow kernel profiling for unpriv",
+ "You may not have permission to collect %sstats.\n\n"
+ "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n"
+ "which controls use of the performance events system by\n"
+ "unprivileged users (without CAP_SYS_ADMIN).\n\n"
+ "The default value is 1:\n\n"
+ " -1: Allow use of (almost) all events by all users\n"
+ ">= 0: Disallow raw tracepoint access by users without CAP_IOC_LOCK\n"
+ ">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n"
+ ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN",
target->system_wide ? "system-wide " : "");
case ENOENT:
return scnprintf(msg, size, "The %s event is not supported.",
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 8e75434bd01c..501ea6e565f1 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -93,10 +93,8 @@ struct perf_evsel {
const char *unit;
struct event_format *tp_format;
off_t id_offset;
- union {
- void *priv;
- u64 db_id;
- };
+ void *priv;
+ u64 db_id;
struct cgroup_sel *cgrp;
void *handler;
struct cpu_map *cpus;
@@ -364,6 +362,14 @@ static inline bool perf_evsel__is_function_event(struct perf_evsel *evsel)
#undef FUNCTION_EVENT
}
+static inline bool perf_evsel__is_bpf_output(struct perf_evsel *evsel)
+{
+ struct perf_event_attr *attr = &evsel->attr;
+
+ return (attr->config == PERF_COUNT_SW_BPF_OUTPUT) &&
+ (attr->type == PERF_TYPE_SOFTWARE);
+}
+
struct perf_attr_details {
bool freq;
bool verbose;
diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
new file mode 100644
index 000000000000..c1ef805c6a8f
--- /dev/null
+++ b/tools/perf/util/genelf.c
@@ -0,0 +1,449 @@
+/*
+ * genelf.c
+ * Copyright (C) 2014, Google, Inc
+ *
+ * Contributed by:
+ * Stephane Eranian <eranian@gmail.com>
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+
+#include <sys/types.h>
+#include <stdio.h>
+#include <getopt.h>
+#include <stddef.h>
+#include <libelf.h>
+#include <string.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <fcntl.h>
+#include <err.h>
+#include <dwarf.h>
+
+#include "perf.h"
+#include "genelf.h"
+#include "../util/jitdump.h"
+
+#define JVMTI
+
+#define BUILD_ID_URANDOM /* different uuid for each run */
+
+#ifdef HAVE_LIBCRYPTO
+
+#define BUILD_ID_MD5
+#undef BUILD_ID_SHA /* does not seem to work well when linked with Java */
+#undef BUILD_ID_URANDOM /* different uuid for each run */
+
+#ifdef BUILD_ID_SHA
+#include <openssl/sha.h>
+#endif
+
+#ifdef BUILD_ID_MD5
+#include <openssl/md5.h>
+#endif
+#endif
+
+
+typedef struct {
+ unsigned int namesz; /* Size of entry's owner string */
+ unsigned int descsz; /* Size of the note descriptor */
+ unsigned int type; /* Interpretation of the descriptor */
+ char name[0]; /* Start of the name+desc data */
+} Elf_Note;
+
+struct options {
+ char *output;
+ int fd;
+};
+
+static char shd_string_table[] = {
+ 0,
+ '.', 't', 'e', 'x', 't', 0, /* 1 */
+ '.', 's', 'h', 's', 't', 'r', 't', 'a', 'b', 0, /* 7 */
+ '.', 's', 'y', 'm', 't', 'a', 'b', 0, /* 17 */
+ '.', 's', 't', 'r', 't', 'a', 'b', 0, /* 25 */
+ '.', 'n', 'o', 't', 'e', '.', 'g', 'n', 'u', '.', 'b', 'u', 'i', 'l', 'd', '-', 'i', 'd', 0, /* 33 */
+ '.', 'd', 'e', 'b', 'u', 'g', '_', 'l', 'i', 'n', 'e', 0, /* 52 */
+ '.', 'd', 'e', 'b', 'u', 'g', '_', 'i', 'n', 'f', 'o', 0, /* 64 */
+ '.', 'd', 'e', 'b', 'u', 'g', '_', 'a', 'b', 'b', 'r', 'e', 'v', 0, /* 76 */
+};
+
+static struct buildid_note {
+ Elf_Note desc; /* descsz: size of build-id, must be multiple of 4 */
+ char name[4]; /* GNU\0 */
+ char build_id[20];
+} bnote;
+
+static Elf_Sym symtab[]={
+ /* symbol 0 MUST be the undefined symbol */
+ { .st_name = 0, /* index in sym_string table */
+ .st_info = ELF_ST_TYPE(STT_NOTYPE),
+ .st_shndx = 0, /* for now */
+ .st_value = 0x0,
+ .st_other = ELF_ST_VIS(STV_DEFAULT),
+ .st_size = 0,
+ },
+ { .st_name = 1, /* index in sym_string table */
+ .st_info = ELF_ST_BIND(STB_LOCAL) | ELF_ST_TYPE(STT_FUNC),
+ .st_shndx = 1,
+ .st_value = 0, /* for now */
+ .st_other = ELF_ST_VIS(STV_DEFAULT),
+ .st_size = 0, /* for now */
+ }
+};
+
+#ifdef BUILD_ID_URANDOM
+static void
+gen_build_id(struct buildid_note *note,
+ unsigned long load_addr __maybe_unused,
+ const void *code __maybe_unused,
+ size_t csize __maybe_unused)
+{
+ int fd;
+ size_t sz = sizeof(note->build_id);
+ ssize_t sret;
+
+ fd = open("/dev/urandom", O_RDONLY);
+ if (fd == -1)
+ err(1, "cannot access /dev/urandom for builid");
+
+ sret = read(fd, note->build_id, sz);
+
+ close(fd);
+
+ if (sret != (ssize_t)sz)
+ memset(note->build_id, 0, sz);
+}
+#endif
+
+#ifdef BUILD_ID_SHA
+static void
+gen_build_id(struct buildid_note *note,
+ unsigned long load_addr __maybe_unused,
+ const void *code,
+ size_t csize)
+{
+ if (sizeof(note->build_id) < SHA_DIGEST_LENGTH)
+ errx(1, "build_id too small for SHA1");
+
+ SHA1(code, csize, (unsigned char *)note->build_id);
+}
+#endif
+
+#ifdef BUILD_ID_MD5
+static void
+gen_build_id(struct buildid_note *note, unsigned long load_addr, const void *code, size_t csize)
+{
+ MD5_CTX context;
+
+ if (sizeof(note->build_id) < 16)
+ errx(1, "build_id too small for MD5");
+
+ MD5_Init(&context);
+ MD5_Update(&context, &load_addr, sizeof(load_addr));
+ MD5_Update(&context, code, csize);
+ MD5_Final((unsigned char *)note->build_id, &context);
+}
+#endif
+
+/*
+ * fd: file descriptor open for writing for the output file
+ * load_addr: code load address (could be zero, just used for buildid)
+ * sym: function name (for native code - used as the symbol)
+ * code: the native code
+ * csize: the code size in bytes
+ */
+int
+jit_write_elf(int fd, uint64_t load_addr, const char *sym,
+ const void *code, int csize,
+ void *debug, int nr_debug_entries)
+{
+ Elf *e;
+ Elf_Data *d;
+ Elf_Scn *scn;
+ Elf_Ehdr *ehdr;
+ Elf_Shdr *shdr;
+ char *strsym = NULL;
+ int symlen;
+ int retval = -1;
+
+ if (elf_version(EV_CURRENT) == EV_NONE) {
+ warnx("ELF initialization failed");
+ return -1;
+ }
+
+ e = elf_begin(fd, ELF_C_WRITE, NULL);
+ if (!e) {
+ warnx("elf_begin failed");
+ goto error;
+ }
+
+ /*
+ * setup ELF header
+ */
+ ehdr = elf_newehdr(e);
+ if (!ehdr) {
+ warnx("cannot get ehdr");
+ goto error;
+ }
+
+ ehdr->e_ident[EI_DATA] = GEN_ELF_ENDIAN;
+ ehdr->e_ident[EI_CLASS] = GEN_ELF_CLASS;
+ ehdr->e_machine = GEN_ELF_ARCH;
+ ehdr->e_type = ET_DYN;
+ ehdr->e_entry = GEN_ELF_TEXT_OFFSET;
+ ehdr->e_version = EV_CURRENT;
+ ehdr->e_shstrndx= 2; /* shdr index for section name */
+
+ /*
+ * setup text section
+ */
+ scn = elf_newscn(e);
+ if (!scn) {
+ warnx("cannot create section");
+ goto error;
+ }
+
+ d = elf_newdata(scn);
+ if (!d) {
+ warnx("cannot get new data");
+ goto error;
+ }
+
+ d->d_align = 16;
+ d->d_off = 0LL;
+ d->d_buf = (void *)code;
+ d->d_type = ELF_T_BYTE;
+ d->d_size = csize;
+ d->d_version = EV_CURRENT;
+
+ shdr = elf_getshdr(scn);
+ if (!shdr) {
+ warnx("cannot get section header");
+ goto error;
+ }
+
+ shdr->sh_name = 1;
+ shdr->sh_type = SHT_PROGBITS;
+ shdr->sh_addr = GEN_ELF_TEXT_OFFSET;
+ shdr->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ shdr->sh_entsize = 0;
+
+ /*
+ * setup section headers string table
+ */
+ scn = elf_newscn(e);
+ if (!scn) {
+ warnx("cannot create section");
+ goto error;
+ }
+
+ d = elf_newdata(scn);
+ if (!d) {
+ warnx("cannot get new data");
+ goto error;
+ }
+
+ d->d_align = 1;
+ d->d_off = 0LL;
+ d->d_buf = shd_string_table;
+ d->d_type = ELF_T_BYTE;
+ d->d_size = sizeof(shd_string_table);
+ d->d_version = EV_CURRENT;
+
+ shdr = elf_getshdr(scn);
+ if (!shdr) {
+ warnx("cannot get section header");
+ goto error;
+ }
+
+ shdr->sh_name = 7; /* offset of '.shstrtab' in shd_string_table */
+ shdr->sh_type = SHT_STRTAB;
+ shdr->sh_flags = 0;
+ shdr->sh_entsize = 0;
+
+ /*
+ * setup symtab section
+ */
+ symtab[1].st_size = csize;
+ symtab[1].st_value = GEN_ELF_TEXT_OFFSET;
+
+ scn = elf_newscn(e);
+ if (!scn) {
+ warnx("cannot create section");
+ goto error;
+ }
+
+ d = elf_newdata(scn);
+ if (!d) {
+ warnx("cannot get new data");
+ goto error;
+ }
+
+ d->d_align = 8;
+ d->d_off = 0LL;
+ d->d_buf = symtab;
+ d->d_type = ELF_T_SYM;
+ d->d_size = sizeof(symtab);
+ d->d_version = EV_CURRENT;
+
+ shdr = elf_getshdr(scn);
+ if (!shdr) {
+ warnx("cannot get section header");
+ goto error;
+ }
+
+ shdr->sh_name = 17; /* offset of '.symtab' in shd_string_table */
+ shdr->sh_type = SHT_SYMTAB;
+ shdr->sh_flags = 0;
+ shdr->sh_entsize = sizeof(Elf_Sym);
+ shdr->sh_link = 4; /* index of .strtab section */
+
+ /*
+ * setup symbols string table
+ * 2 = 1 for 0 in 1st entry, 1 for the 0 at end of symbol for 2nd entry
+ */
+ symlen = 2 + strlen(sym);
+ strsym = calloc(1, symlen);
+ if (!strsym) {
+ warnx("cannot allocate strsym");
+ goto error;
+ }
+ strcpy(strsym + 1, sym);
+
+ scn = elf_newscn(e);
+ if (!scn) {
+ warnx("cannot create section");
+ goto error;
+ }
+
+ d = elf_newdata(scn);
+ if (!d) {
+ warnx("cannot get new data");
+ goto error;
+ }
+
+ d->d_align = 1;
+ d->d_off = 0LL;
+ d->d_buf = strsym;
+ d->d_type = ELF_T_BYTE;
+ d->d_size = symlen;
+ d->d_version = EV_CURRENT;
+
+ shdr = elf_getshdr(scn);
+ if (!shdr) {
+ warnx("cannot get section header");
+ goto error;
+ }
+
+ shdr->sh_name = 25; /* offset in shd_string_table */
+ shdr->sh_type = SHT_STRTAB;
+ shdr->sh_flags = 0;
+ shdr->sh_entsize = 0;
+
+ /*
+ * setup build-id section
+ */
+ scn = elf_newscn(e);
+ if (!scn) {
+ warnx("cannot create section");
+ goto error;
+ }
+
+ d = elf_newdata(scn);
+ if (!d) {
+ warnx("cannot get new data");
+ goto error;
+ }
+
+ /*
+ * build-id generation
+ */
+ gen_build_id(&bnote, load_addr, code, csize);
+ bnote.desc.namesz = sizeof(bnote.name); /* must include 0 termination */
+ bnote.desc.descsz = sizeof(bnote.build_id);
+ bnote.desc.type = NT_GNU_BUILD_ID;
+ strcpy(bnote.name, "GNU");
+
+ d->d_align = 4;
+ d->d_off = 0LL;
+ d->d_buf = &bnote;
+ d->d_type = ELF_T_BYTE;
+ d->d_size = sizeof(bnote);
+ d->d_version = EV_CURRENT;
+
+ shdr = elf_getshdr(scn);
+ if (!shdr) {
+ warnx("cannot get section header");
+ goto error;
+ }
+
+ shdr->sh_name = 33; /* offset in shd_string_table */
+ shdr->sh_type = SHT_NOTE;
+ shdr->sh_addr = 0x0;
+ shdr->sh_flags = SHF_ALLOC;
+ shdr->sh_size = sizeof(bnote);
+ shdr->sh_entsize = 0;
+
+ if (debug && nr_debug_entries) {
+ retval = jit_add_debug_info(e, load_addr, debug, nr_debug_entries);
+ if (retval)
+ goto error;
+ } else {
+ if (elf_update(e, ELF_C_WRITE) < 0) {
+ warnx("elf_update 4 failed");
+ goto error;
+ }
+ }
+
+ retval = 0;
+error:
+ (void)elf_end(e);
+
+ free(strsym);
+
+
+ return retval;
+}
+
+#ifndef JVMTI
+
+static unsigned char x86_code[] = {
+ 0xBB, 0x2A, 0x00, 0x00, 0x00, /* movl $42, %ebx */
+ 0xB8, 0x01, 0x00, 0x00, 0x00, /* movl $1, %eax */
+ 0xCD, 0x80 /* int $0x80 */
+};
+
+static struct options options;
+
+int main(int argc, char **argv)
+{
+ int c, fd, ret;
+
+ while ((c = getopt(argc, argv, "o:h")) != -1) {
+ switch (c) {
+ case 'o':
+ options.output = optarg;
+ break;
+ case 'h':
+ printf("Usage: genelf -o output_file [-h]\n");
+ return 0;
+ default:
+ errx(1, "unknown option");
+ }
+ }
+
+ fd = open(options.output, O_CREAT|O_TRUNC|O_RDWR, 0666);
+ if (fd == -1)
+ err(1, "cannot create file %s", options.output);
+
+ ret = jit_write_elf(fd, "main", x86_code, sizeof(x86_code));
+ close(fd);
+
+ if (ret != 0)
+ unlink(options.output);
+
+ return ret;
+}
+#endif
diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h
new file mode 100644
index 000000000000..45bf9c6d3257
--- /dev/null
+++ b/tools/perf/util/genelf.h
@@ -0,0 +1,67 @@
+#ifndef __GENELF_H__
+#define __GENELF_H__
+
+/* genelf.c */
+extern int jit_write_elf(int fd, uint64_t code_addr, const char *sym,
+ const void *code, int csize,
+ void *debug, int nr_debug_entries);
+/* genelf_debug.c */
+extern int jit_add_debug_info(Elf *e, uint64_t code_addr,
+ void *debug, int nr_debug_entries);
+
+#if defined(__arm__)
+#define GEN_ELF_ARCH EM_ARM
+#define GEN_ELF_ENDIAN ELFDATA2LSB
+#define GEN_ELF_CLASS ELFCLASS32
+#elif defined(__aarch64__)
+#define GEN_ELF_ARCH EM_AARCH64
+#define GEN_ELF_ENDIAN ELFDATA2LSB
+#define GEN_ELF_CLASS ELFCLASS64
+#elif defined(__x86_64__)
+#define GEN_ELF_ARCH EM_X86_64
+#define GEN_ELF_ENDIAN ELFDATA2LSB
+#define GEN_ELF_CLASS ELFCLASS64
+#elif defined(__i386__)
+#define GEN_ELF_ARCH EM_386
+#define GEN_ELF_ENDIAN ELFDATA2LSB
+#define GEN_ELF_CLASS ELFCLASS32
+#elif defined(__ppcle__)
+#define GEN_ELF_ARCH EM_PPC
+#define GEN_ELF_ENDIAN ELFDATA2LSB
+#define GEN_ELF_CLASS ELFCLASS64
+#elif defined(__powerpc__)
+#define GEN_ELF_ARCH EM_PPC64
+#define GEN_ELF_ENDIAN ELFDATA2MSB
+#define GEN_ELF_CLASS ELFCLASS64
+#elif defined(__powerpcle__)
+#define GEN_ELF_ARCH EM_PPC64
+#define GEN_ELF_ENDIAN ELFDATA2LSB
+#define GEN_ELF_CLASS ELFCLASS64
+#else
+#error "unsupported architecture"
+#endif
+
+#if GEN_ELF_CLASS == ELFCLASS64
+#define elf_newehdr elf64_newehdr
+#define elf_getshdr elf64_getshdr
+#define Elf_Ehdr Elf64_Ehdr
+#define Elf_Shdr Elf64_Shdr
+#define Elf_Sym Elf64_Sym
+#define ELF_ST_TYPE(a) ELF64_ST_TYPE(a)
+#define ELF_ST_BIND(a) ELF64_ST_BIND(a)
+#define ELF_ST_VIS(a) ELF64_ST_VISIBILITY(a)
+#else
+#define elf_newehdr elf32_newehdr
+#define elf_getshdr elf32_getshdr
+#define Elf_Ehdr Elf32_Ehdr
+#define Elf_Shdr Elf32_Shdr
+#define Elf_Sym Elf32_Sym
+#define ELF_ST_TYPE(a) ELF32_ST_TYPE(a)
+#define ELF_ST_BIND(a) ELF32_ST_BIND(a)
+#define ELF_ST_VIS(a) ELF32_ST_VISIBILITY(a)
+#endif
+
+/* The .text section is directly after the ELF header */
+#define GEN_ELF_TEXT_OFFSET sizeof(Elf_Ehdr)
+
+#endif
diff --git a/tools/perf/util/genelf_debug.c b/tools/perf/util/genelf_debug.c
new file mode 100644
index 000000000000..5980f7d256b1
--- /dev/null
+++ b/tools/perf/util/genelf_debug.c
@@ -0,0 +1,610 @@
+/*
+ * genelf_debug.c
+ * Copyright (C) 2015, Google, Inc
+ *
+ * Contributed by:
+ * Stephane Eranian <eranian@google.com>
+ *
+ * Released under the GPL v2.
+ *
+ * based on GPLv2 source code from Oprofile
+ * @remark Copyright 2007 OProfile authors
+ * @author Philippe Elie
+ */
+#include <sys/types.h>
+#include <stdio.h>
+#include <getopt.h>
+#include <stddef.h>
+#include <libelf.h>
+#include <string.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <fcntl.h>
+#include <err.h>
+#include <dwarf.h>
+
+#include "perf.h"
+#include "genelf.h"
+#include "../util/jitdump.h"
+
+#define BUFFER_EXT_DFL_SIZE (4 * 1024)
+
+typedef uint32_t uword;
+typedef uint16_t uhalf;
+typedef int32_t sword;
+typedef int16_t shalf;
+typedef uint8_t ubyte;
+typedef int8_t sbyte;
+
+struct buffer_ext {
+ size_t cur_pos;
+ size_t max_sz;
+ void *data;
+};
+
+static void
+buffer_ext_dump(struct buffer_ext *be, const char *msg)
+{
+ size_t i;
+ warnx("DUMP for %s", msg);
+ for (i = 0 ; i < be->cur_pos; i++)
+ warnx("%4zu 0x%02x", i, (((char *)be->data)[i]) & 0xff);
+}
+
+static inline int
+buffer_ext_add(struct buffer_ext *be, void *addr, size_t sz)
+{
+ void *tmp;
+ size_t be_sz = be->max_sz;
+
+retry:
+ if ((be->cur_pos + sz) < be_sz) {
+ memcpy(be->data + be->cur_pos, addr, sz);
+ be->cur_pos += sz;
+ return 0;
+ }
+
+ if (!be_sz)
+ be_sz = BUFFER_EXT_DFL_SIZE;
+ else
+ be_sz <<= 1;
+
+ tmp = realloc(be->data, be_sz);
+ if (!tmp)
+ return -1;
+
+ be->data = tmp;
+ be->max_sz = be_sz;
+
+ goto retry;
+}
+
+static void
+buffer_ext_init(struct buffer_ext *be)
+{
+ be->data = NULL;
+ be->cur_pos = 0;
+ be->max_sz = 0;
+}
+
+static inline size_t
+buffer_ext_size(struct buffer_ext *be)
+{
+ return be->cur_pos;
+}
+
+static inline void *
+buffer_ext_addr(struct buffer_ext *be)
+{
+ return be->data;
+}
+
+struct debug_line_header {
+ // Not counting this field
+ uword total_length;
+ // version number (2 currently)
+ uhalf version;
+ // relative offset from next field to
+ // program statement
+ uword prolog_length;
+ ubyte minimum_instruction_length;
+ ubyte default_is_stmt;
+ // line_base - see DWARF 2 specs
+ sbyte line_base;
+ // line_range - see DWARF 2 specs
+ ubyte line_range;
+ // number of opcode + 1
+ ubyte opcode_base;
+ /* follow the array of opcode args nr: ubytes [nr_opcode_base] */
+ /* follow the search directories index, zero terminated string
+ * terminated by an empty string.
+ */
+ /* follow an array of { filename, LEB128, LEB128, LEB128 }, first is
+ * the directory index entry, 0 means current directory, then mtime
+ * and filesize, last entry is followed by en empty string.
+ */
+ /* follow the first program statement */
+} __attribute__((packed));
+
+/* DWARF 2 spec talk only about one possible compilation unit header while
+ * binutils can handle two flavours of dwarf 2, 32 and 64 bits, this is not
+ * related to the used arch, an ELF 32 can hold more than 4 Go of debug
+ * information. For now we handle only DWARF 2 32 bits comp unit. It'll only
+ * become a problem if we generate more than 4GB of debug information.
+ */
+struct compilation_unit_header {
+ uword total_length;
+ uhalf version;
+ uword debug_abbrev_offset;
+ ubyte pointer_size;
+} __attribute__((packed));
+
+#define DW_LNS_num_opcode (DW_LNS_set_isa + 1)
+
+/* field filled at run time are marked with -1 */
+static struct debug_line_header const default_debug_line_header = {
+ .total_length = -1,
+ .version = 2,
+ .prolog_length = -1,
+ .minimum_instruction_length = 1, /* could be better when min instruction size != 1 */
+ .default_is_stmt = 1, /* we don't take care about basic block */
+ .line_base = -5, /* sensible value for line base ... */
+ .line_range = -14, /* ... and line range are guessed statically */
+ .opcode_base = DW_LNS_num_opcode
+};
+
+static ubyte standard_opcode_length[] =
+{
+ 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1
+};
+#if 0
+{
+ [DW_LNS_advance_pc] = 1,
+ [DW_LNS_advance_line] = 1,
+ [DW_LNS_set_file] = 1,
+ [DW_LNS_set_column] = 1,
+ [DW_LNS_fixed_advance_pc] = 1,
+ [DW_LNS_set_isa] = 1,
+};
+#endif
+
+/* field filled at run time are marked with -1 */
+static struct compilation_unit_header default_comp_unit_header = {
+ .total_length = -1,
+ .version = 2,
+ .debug_abbrev_offset = 0, /* we reuse the same abbrev entries for all comp unit */
+ .pointer_size = sizeof(void *)
+};
+
+static void emit_uword(struct buffer_ext *be, uword data)
+{
+ buffer_ext_add(be, &data, sizeof(uword));
+}
+
+static void emit_string(struct buffer_ext *be, const char *s)
+{
+ buffer_ext_add(be, (void *)s, strlen(s) + 1);
+}
+
+static void emit_unsigned_LEB128(struct buffer_ext *be,
+ unsigned long data)
+{
+ do {
+ ubyte cur = data & 0x7F;
+ data >>= 7;
+ if (data)
+ cur |= 0x80;
+ buffer_ext_add(be, &cur, 1);
+ } while (data);
+}
+
+static void emit_signed_LEB128(struct buffer_ext *be, long data)
+{
+ int more = 1;
+ int negative = data < 0;
+ int size = sizeof(long) * CHAR_BIT;
+ while (more) {
+ ubyte cur = data & 0x7F;
+ data >>= 7;
+ if (negative)
+ data |= - (1 << (size - 7));
+ if ((data == 0 && !(cur & 0x40)) ||
+ (data == -1l && (cur & 0x40)))
+ more = 0;
+ else
+ cur |= 0x80;
+ buffer_ext_add(be, &cur, 1);
+ }
+}
+
+static void emit_extended_opcode(struct buffer_ext *be, ubyte opcode,
+ void *data, size_t data_len)
+{
+ buffer_ext_add(be, (char *)"", 1);
+
+ emit_unsigned_LEB128(be, data_len + 1);
+
+ buffer_ext_add(be, &opcode, 1);
+ buffer_ext_add(be, data, data_len);
+}
+
+static void emit_opcode(struct buffer_ext *be, ubyte opcode)
+{
+ buffer_ext_add(be, &opcode, 1);
+}
+
+static void emit_opcode_signed(struct buffer_ext *be,
+ ubyte opcode, long data)
+{
+ buffer_ext_add(be, &opcode, 1);
+ emit_signed_LEB128(be, data);
+}
+
+static void emit_opcode_unsigned(struct buffer_ext *be, ubyte opcode,
+ unsigned long data)
+{
+ buffer_ext_add(be, &opcode, 1);
+ emit_unsigned_LEB128(be, data);
+}
+
+static void emit_advance_pc(struct buffer_ext *be, unsigned long delta_pc)
+{
+ emit_opcode_unsigned(be, DW_LNS_advance_pc, delta_pc);
+}
+
+static void emit_advance_lineno(struct buffer_ext *be, long delta_lineno)
+{
+ emit_opcode_signed(be, DW_LNS_advance_line, delta_lineno);
+}
+
+static void emit_lne_end_of_sequence(struct buffer_ext *be)
+{
+ emit_extended_opcode(be, DW_LNE_end_sequence, NULL, 0);
+}
+
+static void emit_set_file(struct buffer_ext *be, unsigned long idx)
+{
+ emit_opcode_unsigned(be, DW_LNS_set_file, idx);
+}
+
+static void emit_lne_define_filename(struct buffer_ext *be,
+ const char *filename)
+{
+ buffer_ext_add(be, (void *)"", 1);
+
+ /* LNE field, strlen(filename) + zero termination, 3 bytes for: the dir entry, timestamp, filesize */
+ emit_unsigned_LEB128(be, strlen(filename) + 5);
+ emit_opcode(be, DW_LNE_define_file);
+ emit_string(be, filename);
+ /* directory index 0=do not know */
+ emit_unsigned_LEB128(be, 0);
+ /* last modification date on file 0=do not know */
+ emit_unsigned_LEB128(be, 0);
+ /* filesize 0=do not know */
+ emit_unsigned_LEB128(be, 0);
+}
+
+static void emit_lne_set_address(struct buffer_ext *be,
+ void *address)
+{
+ emit_extended_opcode(be, DW_LNE_set_address, &address, sizeof(unsigned long));
+}
+
+static ubyte get_special_opcode(struct debug_entry *ent,
+ unsigned int last_line,
+ unsigned long last_vma)
+{
+ unsigned int temp;
+ unsigned long delta_addr;
+
+ /*
+ * delta from line_base
+ */
+ temp = (ent->lineno - last_line) - default_debug_line_header.line_base;
+
+ if (temp >= default_debug_line_header.line_range)
+ return 0;
+
+ /*
+ * delta of addresses
+ */
+ delta_addr = (ent->addr - last_vma) / default_debug_line_header.minimum_instruction_length;
+
+ /* This is not sufficient to ensure opcode will be in [0-256] but
+ * sufficient to ensure when summing with the delta lineno we will
+ * not overflow the unsigned long opcode */
+
+ if (delta_addr <= 256 / default_debug_line_header.line_range) {
+ unsigned long opcode = temp +
+ (delta_addr * default_debug_line_header.line_range) +
+ default_debug_line_header.opcode_base;
+
+ return opcode <= 255 ? opcode : 0;
+ }
+ return 0;
+}
+
+static void emit_lineno_info(struct buffer_ext *be,
+ struct debug_entry *ent, size_t nr_entry,
+ unsigned long code_addr)
+{
+ size_t i;
+
+ /*
+ * Machine state at start of a statement program
+ * address = 0
+ * file = 1
+ * line = 1
+ * column = 0
+ * is_stmt = default_is_stmt as given in the debug_line_header
+ * basic block = 0
+ * end sequence = 0
+ */
+
+ /* start state of the state machine we take care of */
+ unsigned long last_vma = code_addr;
+ char const *cur_filename = NULL;
+ unsigned long cur_file_idx = 0;
+ int last_line = 1;
+
+ emit_lne_set_address(be, (void *)code_addr);
+
+ for (i = 0; i < nr_entry; i++, ent = debug_entry_next(ent)) {
+ int need_copy = 0;
+ ubyte special_opcode;
+
+ /*
+ * check if filename changed, if so add it
+ */
+ if (!cur_filename || strcmp(cur_filename, ent->name)) {
+ emit_lne_define_filename(be, ent->name);
+ cur_filename = ent->name;
+ emit_set_file(be, ++cur_file_idx);
+ need_copy = 1;
+ }
+
+ special_opcode = get_special_opcode(ent, last_line, last_vma);
+ if (special_opcode != 0) {
+ last_line = ent->lineno;
+ last_vma = ent->addr;
+ emit_opcode(be, special_opcode);
+ } else {
+ /*
+ * lines differ, emit line delta
+ */
+ if (last_line != ent->lineno) {
+ emit_advance_lineno(be, ent->lineno - last_line);
+ last_line = ent->lineno;
+ need_copy = 1;
+ }
+ /*
+ * addresses differ, emit address delta
+ */
+ if (last_vma != ent->addr) {
+ emit_advance_pc(be, ent->addr - last_vma);
+ last_vma = ent->addr;
+ need_copy = 1;
+ }
+ /*
+ * add new row to matrix
+ */
+ if (need_copy)
+ emit_opcode(be, DW_LNS_copy);
+ }
+ }
+}
+
+static void add_debug_line(struct buffer_ext *be,
+ struct debug_entry *ent, size_t nr_entry,
+ unsigned long code_addr)
+{
+ struct debug_line_header * dbg_header;
+ size_t old_size;
+
+ old_size = buffer_ext_size(be);
+
+ buffer_ext_add(be, (void *)&default_debug_line_header,
+ sizeof(default_debug_line_header));
+
+ buffer_ext_add(be, &standard_opcode_length, sizeof(standard_opcode_length));
+
+ // empty directory entry
+ buffer_ext_add(be, (void *)"", 1);
+
+ // empty filename directory
+ buffer_ext_add(be, (void *)"", 1);
+
+ dbg_header = buffer_ext_addr(be) + old_size;
+ dbg_header->prolog_length = (buffer_ext_size(be) - old_size) -
+ offsetof(struct debug_line_header, minimum_instruction_length);
+
+ emit_lineno_info(be, ent, nr_entry, code_addr);
+
+ emit_lne_end_of_sequence(be);
+
+ dbg_header = buffer_ext_addr(be) + old_size;
+ dbg_header->total_length = (buffer_ext_size(be) - old_size) -
+ offsetof(struct debug_line_header, version);
+}
+
+static void
+add_debug_abbrev(struct buffer_ext *be)
+{
+ emit_unsigned_LEB128(be, 1);
+ emit_unsigned_LEB128(be, DW_TAG_compile_unit);
+ emit_unsigned_LEB128(be, DW_CHILDREN_yes);
+ emit_unsigned_LEB128(be, DW_AT_stmt_list);
+ emit_unsigned_LEB128(be, DW_FORM_data4);
+ emit_unsigned_LEB128(be, 0);
+ emit_unsigned_LEB128(be, 0);
+ emit_unsigned_LEB128(be, 0);
+}
+
+static void
+add_compilation_unit(struct buffer_ext *be,
+ size_t offset_debug_line)
+{
+ struct compilation_unit_header *comp_unit_header;
+ size_t old_size = buffer_ext_size(be);
+
+ buffer_ext_add(be, &default_comp_unit_header,
+ sizeof(default_comp_unit_header));
+
+ emit_unsigned_LEB128(be, 1);
+ emit_uword(be, offset_debug_line);
+
+ comp_unit_header = buffer_ext_addr(be) + old_size;
+ comp_unit_header->total_length = (buffer_ext_size(be) - old_size) -
+ offsetof(struct compilation_unit_header, version);
+}
+
+static int
+jit_process_debug_info(uint64_t code_addr,
+ void *debug, int nr_debug_entries,
+ struct buffer_ext *dl,
+ struct buffer_ext *da,
+ struct buffer_ext *di)
+{
+ struct debug_entry *ent = debug;
+ int i;
+
+ for (i = 0; i < nr_debug_entries; i++) {
+ ent->addr = ent->addr - code_addr;
+ ent = debug_entry_next(ent);
+ }
+ add_compilation_unit(di, buffer_ext_size(dl));
+ add_debug_line(dl, debug, nr_debug_entries, 0);
+ add_debug_abbrev(da);
+ if (0) buffer_ext_dump(da, "abbrev");
+
+ return 0;
+}
+
+int
+jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_entries)
+{
+ Elf_Data *d;
+ Elf_Scn *scn;
+ Elf_Shdr *shdr;
+ struct buffer_ext dl, di, da;
+ int ret;
+
+ buffer_ext_init(&dl);
+ buffer_ext_init(&di);
+ buffer_ext_init(&da);
+
+ ret = jit_process_debug_info(code_addr, debug, nr_debug_entries, &dl, &da, &di);
+ if (ret)
+ return -1;
+ /*
+ * setup .debug_line section
+ */
+ scn = elf_newscn(e);
+ if (!scn) {
+ warnx("cannot create section");
+ return -1;
+ }
+
+ d = elf_newdata(scn);
+ if (!d) {
+ warnx("cannot get new data");
+ return -1;
+ }
+
+ d->d_align = 1;
+ d->d_off = 0LL;
+ d->d_buf = buffer_ext_addr(&dl);
+ d->d_type = ELF_T_BYTE;
+ d->d_size = buffer_ext_size(&dl);
+ d->d_version = EV_CURRENT;
+
+ shdr = elf_getshdr(scn);
+ if (!shdr) {
+ warnx("cannot get section header");
+ return -1;
+ }
+
+ shdr->sh_name = 52; /* .debug_line */
+ shdr->sh_type = SHT_PROGBITS;
+ shdr->sh_addr = 0; /* must be zero or == sh_offset -> dynamic object */
+ shdr->sh_flags = 0;
+ shdr->sh_entsize = 0;
+
+ /*
+ * setup .debug_info section
+ */
+ scn = elf_newscn(e);
+ if (!scn) {
+ warnx("cannot create section");
+ return -1;
+ }
+
+ d = elf_newdata(scn);
+ if (!d) {
+ warnx("cannot get new data");
+ return -1;
+ }
+
+ d->d_align = 1;
+ d->d_off = 0LL;
+ d->d_buf = buffer_ext_addr(&di);
+ d->d_type = ELF_T_BYTE;
+ d->d_size = buffer_ext_size(&di);
+ d->d_version = EV_CURRENT;
+
+ shdr = elf_getshdr(scn);
+ if (!shdr) {
+ warnx("cannot get section header");
+ return -1;
+ }
+
+ shdr->sh_name = 64; /* .debug_info */
+ shdr->sh_type = SHT_PROGBITS;
+ shdr->sh_addr = 0; /* must be zero or == sh_offset -> dynamic object */
+ shdr->sh_flags = 0;
+ shdr->sh_entsize = 0;
+
+ /*
+ * setup .debug_abbrev section
+ */
+ scn = elf_newscn(e);
+ if (!scn) {
+ warnx("cannot create section");
+ return -1;
+ }
+
+ d = elf_newdata(scn);
+ if (!d) {
+ warnx("cannot get new data");
+ return -1;
+ }
+
+ d->d_align = 1;
+ d->d_off = 0LL;
+ d->d_buf = buffer_ext_addr(&da);
+ d->d_type = ELF_T_BYTE;
+ d->d_size = buffer_ext_size(&da);
+ d->d_version = EV_CURRENT;
+
+ shdr = elf_getshdr(scn);
+ if (!shdr) {
+ warnx("cannot get section header");
+ return -1;
+ }
+
+ shdr->sh_name = 76; /* .debug_info */
+ shdr->sh_type = SHT_PROGBITS;
+ shdr->sh_addr = 0; /* must be zero or == sh_offset -> dynamic object */
+ shdr->sh_flags = 0;
+ shdr->sh_entsize = 0;
+
+ /*
+ * now we update the ELF image with all the sections
+ */
+ if (elf_update(e, ELF_C_WRITE) < 0) {
+ warnx("elf_update debug failed");
+ return -1;
+ }
+ return 0;
+}
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index f50b7235ecb6..73e38e472ecd 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -23,6 +23,8 @@
#include "strbuf.h"
#include "build-id.h"
#include "data.h"
+#include <api/fs/fs.h>
+#include "asm/bug.h"
/*
* magic2 = "PERFILE2"
@@ -868,6 +870,199 @@ static int write_auxtrace(int fd, struct perf_header *h,
return err;
}
+static int cpu_cache_level__sort(const void *a, const void *b)
+{
+ struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
+ struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
+
+ return cache_a->level - cache_b->level;
+}
+
+static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
+{
+ if (a->level != b->level)
+ return false;
+
+ if (a->line_size != b->line_size)
+ return false;
+
+ if (a->sets != b->sets)
+ return false;
+
+ if (a->ways != b->ways)
+ return false;
+
+ if (strcmp(a->type, b->type))
+ return false;
+
+ if (strcmp(a->size, b->size))
+ return false;
+
+ if (strcmp(a->map, b->map))
+ return false;
+
+ return true;
+}
+
+static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
+{
+ char path[PATH_MAX], file[PATH_MAX];
+ struct stat st;
+ size_t len;
+
+ scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
+ scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
+
+ if (stat(file, &st))
+ return 1;
+
+ scnprintf(file, PATH_MAX, "%s/level", path);
+ if (sysfs__read_int(file, (int *) &cache->level))
+ return -1;
+
+ scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
+ if (sysfs__read_int(file, (int *) &cache->line_size))
+ return -1;
+
+ scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
+ if (sysfs__read_int(file, (int *) &cache->sets))
+ return -1;
+
+ scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
+ if (sysfs__read_int(file, (int *) &cache->ways))
+ return -1;
+
+ scnprintf(file, PATH_MAX, "%s/type", path);
+ if (sysfs__read_str(file, &cache->type, &len))
+ return -1;
+
+ cache->type[len] = 0;
+ cache->type = rtrim(cache->type);
+
+ scnprintf(file, PATH_MAX, "%s/size", path);
+ if (sysfs__read_str(file, &cache->size, &len)) {
+ free(cache->type);
+ return -1;
+ }
+
+ cache->size[len] = 0;
+ cache->size = rtrim(cache->size);
+
+ scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
+ if (sysfs__read_str(file, &cache->map, &len)) {
+ free(cache->map);
+ free(cache->type);
+ return -1;
+ }
+
+ cache->map[len] = 0;
+ cache->map = rtrim(cache->map);
+ return 0;
+}
+
+static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
+{
+ fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
+}
+
+static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
+{
+ u32 i, cnt = 0;
+ long ncpus;
+ u32 nr, cpu;
+ u16 level;
+
+ ncpus = sysconf(_SC_NPROCESSORS_CONF);
+ if (ncpus < 0)
+ return -1;
+
+ nr = (u32)(ncpus & UINT_MAX);
+
+ for (cpu = 0; cpu < nr; cpu++) {
+ for (level = 0; level < 10; level++) {
+ struct cpu_cache_level c;
+ int err;
+
+ err = cpu_cache_level__read(&c, cpu, level);
+ if (err < 0)
+ return err;
+
+ if (err == 1)
+ break;
+
+ for (i = 0; i < cnt; i++) {
+ if (cpu_cache_level__cmp(&c, &caches[i]))
+ break;
+ }
+
+ if (i == cnt)
+ caches[cnt++] = c;
+ else
+ cpu_cache_level__free(&c);
+
+ if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
+ goto out;
+ }
+ }
+ out:
+ *cntp = cnt;
+ return 0;
+}
+
+#define MAX_CACHES 2000
+
+static int write_cache(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ struct cpu_cache_level caches[MAX_CACHES];
+ u32 cnt = 0, i, version = 1;
+ int ret;
+
+ ret = build_caches(caches, MAX_CACHES, &cnt);
+ if (ret)
+ goto out;
+
+ qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
+
+ ret = do_write(fd, &version, sizeof(u32));
+ if (ret < 0)
+ goto out;
+
+ ret = do_write(fd, &cnt, sizeof(u32));
+ if (ret < 0)
+ goto out;
+
+ for (i = 0; i < cnt; i++) {
+ struct cpu_cache_level *c = &caches[i];
+
+ #define _W(v) \
+ ret = do_write(fd, &c->v, sizeof(u32)); \
+ if (ret < 0) \
+ goto out;
+
+ _W(level)
+ _W(line_size)
+ _W(sets)
+ _W(ways)
+ #undef _W
+
+ #define _W(v) \
+ ret = do_write_string(fd, (const char *) c->v); \
+ if (ret < 0) \
+ goto out;
+
+ _W(type)
+ _W(size)
+ _W(map)
+ #undef _W
+ }
+
+out:
+ for (i = 0; i < cnt; i++)
+ cpu_cache_level__free(&caches[i]);
+ return ret;
+}
+
static int write_stat(int fd __maybe_unused,
struct perf_header *h __maybe_unused,
struct perf_evlist *evlist __maybe_unused)
@@ -1172,6 +1367,18 @@ static void print_stat(struct perf_header *ph __maybe_unused,
fprintf(fp, "# contains stat data\n");
}
+static void print_cache(struct perf_header *ph __maybe_unused,
+ int fd __maybe_unused, FILE *fp __maybe_unused)
+{
+ int i;
+
+ fprintf(fp, "# CPU cache info:\n");
+ for (i = 0; i < ph->env.caches_cnt; i++) {
+ fprintf(fp, "# ");
+ cpu_cache_level__fprintf(fp, &ph->env.caches[i]);
+ }
+}
+
static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
FILE *fp)
{
@@ -1920,6 +2127,68 @@ static int process_auxtrace(struct perf_file_section *section,
return err;
}
+static int process_cache(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph __maybe_unused, int fd __maybe_unused,
+ void *data __maybe_unused)
+{
+ struct cpu_cache_level *caches;
+ u32 cnt, i, version;
+
+ if (readn(fd, &version, sizeof(version)) != sizeof(version))
+ return -1;
+
+ if (ph->needs_swap)
+ version = bswap_32(version);
+
+ if (version != 1)
+ return -1;
+
+ if (readn(fd, &cnt, sizeof(cnt)) != sizeof(cnt))
+ return -1;
+
+ if (ph->needs_swap)
+ cnt = bswap_32(cnt);
+
+ caches = zalloc(sizeof(*caches) * cnt);
+ if (!caches)
+ return -1;
+
+ for (i = 0; i < cnt; i++) {
+ struct cpu_cache_level c;
+
+ #define _R(v) \
+ if (readn(fd, &c.v, sizeof(u32)) != sizeof(u32))\
+ goto out_free_caches; \
+ if (ph->needs_swap) \
+ c.v = bswap_32(c.v); \
+
+ _R(level)
+ _R(line_size)
+ _R(sets)
+ _R(ways)
+ #undef _R
+
+ #define _R(v) \
+ c.v = do_read_string(fd, ph); \
+ if (!c.v) \
+ goto out_free_caches;
+
+ _R(type)
+ _R(size)
+ _R(map)
+ #undef _R
+
+ caches[i] = c;
+ }
+
+ ph->env.caches = caches;
+ ph->env.caches_cnt = cnt;
+ return 0;
+out_free_caches:
+ free(caches);
+ return -1;
+}
+
struct feature_ops {
int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
void (*print)(struct perf_header *h, int fd, FILE *fp);
@@ -1962,6 +2231,7 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
FEAT_OPP(HEADER_GROUP_DESC, group_desc),
FEAT_OPP(HEADER_AUXTRACE, auxtrace),
FEAT_OPA(HEADER_STAT, stat),
+ FEAT_OPF(HEADER_CACHE, cache),
};
struct header_print_data {
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index cff9892452ee..3d87ca823c0a 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -32,6 +32,7 @@ enum {
HEADER_GROUP_DESC,
HEADER_AUXTRACE,
HEADER_STAT,
+ HEADER_CACHE,
HEADER_LAST_FEATURE,
HEADER_FEAT_BITS = 256,
};
diff --git a/tools/perf/util/help-unknown-cmd.c b/tools/perf/util/help-unknown-cmd.c
index dc1e41c9b054..43a98a4dc1e1 100644
--- a/tools/perf/util/help-unknown-cmd.c
+++ b/tools/perf/util/help-unknown-cmd.c
@@ -6,7 +6,8 @@
static int autocorrect;
static struct cmdnames aliases;
-static int perf_unknown_cmd_config(const char *var, const char *value, void *cb)
+static int perf_unknown_cmd_config(const char *var, const char *value,
+ void *cb __maybe_unused)
{
if (!strcmp(var, "help.autocorrect"))
autocorrect = perf_config_int(var,value);
@@ -14,7 +15,7 @@ static int perf_unknown_cmd_config(const char *var, const char *value, void *cb)
if (!prefixcmp(var, "alias."))
add_cmdname(&aliases, var + 6, strlen(var + 6));
- return perf_default_config(var, value, cb);
+ return 0;
}
static int levenshtein_compare(const void *p1, const void *p2)
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 68a7612019dc..290b3cbf6877 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -179,6 +179,9 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
if (h->transaction)
hists__new_col_len(hists, HISTC_TRANSACTION,
hist_entry__transaction_len());
+
+ if (h->trace_output)
+ hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
}
void hists__output_recalc_col_len(struct hists *hists, int max_rows)
@@ -245,6 +248,8 @@ static void he_stat__decay(struct he_stat *he_stat)
/* XXX need decay for weight too? */
}
+static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
+
static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
{
u64 prev_period = he->stat.period;
@@ -260,21 +265,45 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
diff = prev_period - he->stat.period;
- hists->stats.total_period -= diff;
- if (!he->filtered)
- hists->stats.total_non_filtered_period -= diff;
+ if (!he->depth) {
+ hists->stats.total_period -= diff;
+ if (!he->filtered)
+ hists->stats.total_non_filtered_period -= diff;
+ }
+
+ if (!he->leaf) {
+ struct hist_entry *child;
+ struct rb_node *node = rb_first(&he->hroot_out);
+ while (node) {
+ child = rb_entry(node, struct hist_entry, rb_node);
+ node = rb_next(node);
+
+ if (hists__decay_entry(hists, child))
+ hists__delete_entry(hists, child);
+ }
+ }
return he->stat.period == 0;
}
static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
{
- rb_erase(&he->rb_node, &hists->entries);
+ struct rb_root *root_in;
+ struct rb_root *root_out;
- if (sort__need_collapse)
- rb_erase(&he->rb_node_in, &hists->entries_collapsed);
- else
- rb_erase(&he->rb_node_in, hists->entries_in);
+ if (he->parent_he) {
+ root_in = &he->parent_he->hroot_in;
+ root_out = &he->parent_he->hroot_out;
+ } else {
+ if (sort__need_collapse)
+ root_in = &hists->entries_collapsed;
+ else
+ root_in = hists->entries_in;
+ root_out = &hists->entries;
+ }
+
+ rb_erase(&he->rb_node_in, root_in);
+ rb_erase(&he->rb_node, root_out);
--hists->nr_entries;
if (!he->filtered)
@@ -393,6 +422,9 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template,
}
INIT_LIST_HEAD(&he->pairs.node);
thread__get(he->thread);
+
+ if (!symbol_conf.report_hierarchy)
+ he->leaf = true;
}
return he;
@@ -405,6 +437,16 @@ static u8 symbol__parent_filter(const struct symbol *parent)
return 0;
}
+static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
+{
+ if (!symbol_conf.use_callchain)
+ return;
+
+ he->hists->callchain_period += period;
+ if (!he->filtered)
+ he->hists->callchain_non_filtered_period += period;
+}
+
static struct hist_entry *hists__findnew_entry(struct hists *hists,
struct hist_entry *entry,
struct addr_location *al,
@@ -432,8 +474,10 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
cmp = hist_entry__cmp(he, entry);
if (!cmp) {
- if (sample_self)
+ if (sample_self) {
he_stat__add_period(&he->stat, period, weight);
+ hist_entry__add_callchain_period(he, period);
+ }
if (symbol_conf.cumulate_callchain)
he_stat__add_period(he->stat_acc, period, weight);
@@ -466,6 +510,8 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
if (!he)
return NULL;
+ if (sample_self)
+ hist_entry__add_callchain_period(he, period);
hists->nr_entries++;
rb_link_node(&he->rb_node_in, parent, p);
@@ -951,10 +997,15 @@ out:
int64_t
hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
{
+ struct hists *hists = left->hists;
struct perf_hpp_fmt *fmt;
int64_t cmp = 0;
- perf_hpp__for_each_sort_list(fmt) {
+ hists__for_each_sort_list(hists, fmt) {
+ if (perf_hpp__is_dynamic_entry(fmt) &&
+ !perf_hpp__defined_dynamic_entry(fmt, hists))
+ continue;
+
cmp = fmt->cmp(fmt, left, right);
if (cmp)
break;
@@ -966,10 +1017,15 @@ hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
int64_t
hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
{
+ struct hists *hists = left->hists;
struct perf_hpp_fmt *fmt;
int64_t cmp = 0;
- perf_hpp__for_each_sort_list(fmt) {
+ hists__for_each_sort_list(hists, fmt) {
+ if (perf_hpp__is_dynamic_entry(fmt) &&
+ !perf_hpp__defined_dynamic_entry(fmt, hists))
+ continue;
+
cmp = fmt->collapse(fmt, left, right);
if (cmp)
break;
@@ -1006,17 +1062,250 @@ void hist_entry__delete(struct hist_entry *he)
}
/*
+ * If this is not the last column, then we need to pad it according to the
+ * pre-calculated max lenght for this column, otherwise don't bother adding
+ * spaces because that would break viewing this with, for instance, 'less',
+ * that would show tons of trailing spaces when a long C++ demangled method
+ * names is sampled.
+*/
+int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
+ struct perf_hpp_fmt *fmt, int printed)
+{
+ if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
+ const int width = fmt->width(fmt, hpp, hists_to_evsel(he->hists));
+ if (printed < width) {
+ advance_hpp(hpp, printed);
+ printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
+ }
+ }
+
+ return printed;
+}
+
+/*
* collapse the histogram
*/
-bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
- struct rb_root *root, struct hist_entry *he)
+static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
+static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
+ enum hist_filter type);
+
+typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
+
+static bool check_thread_entry(struct perf_hpp_fmt *fmt)
+{
+ return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
+}
+
+static void hist_entry__check_and_remove_filter(struct hist_entry *he,
+ enum hist_filter type,
+ fmt_chk_fn check)
+{
+ struct perf_hpp_fmt *fmt;
+ bool type_match = false;
+ struct hist_entry *parent = he->parent_he;
+
+ switch (type) {
+ case HIST_FILTER__THREAD:
+ if (symbol_conf.comm_list == NULL &&
+ symbol_conf.pid_list == NULL &&
+ symbol_conf.tid_list == NULL)
+ return;
+ break;
+ case HIST_FILTER__DSO:
+ if (symbol_conf.dso_list == NULL)
+ return;
+ break;
+ case HIST_FILTER__SYMBOL:
+ if (symbol_conf.sym_list == NULL)
+ return;
+ break;
+ case HIST_FILTER__PARENT:
+ case HIST_FILTER__GUEST:
+ case HIST_FILTER__HOST:
+ case HIST_FILTER__SOCKET:
+ default:
+ return;
+ }
+
+ /* if it's filtered by own fmt, it has to have filter bits */
+ perf_hpp_list__for_each_format(he->hpp_list, fmt) {
+ if (check(fmt)) {
+ type_match = true;
+ break;
+ }
+ }
+
+ if (type_match) {
+ /*
+ * If the filter is for current level entry, propagate
+ * filter marker to parents. The marker bit was
+ * already set by default so it only needs to clear
+ * non-filtered entries.
+ */
+ if (!(he->filtered & (1 << type))) {
+ while (parent) {
+ parent->filtered &= ~(1 << type);
+ parent = parent->parent_he;
+ }
+ }
+ } else {
+ /*
+ * If current entry doesn't have matching formats, set
+ * filter marker for upper level entries. it will be
+ * cleared if its lower level entries is not filtered.
+ *
+ * For lower-level entries, it inherits parent's
+ * filter bit so that lower level entries of a
+ * non-filtered entry won't set the filter marker.
+ */
+ if (parent == NULL)
+ he->filtered |= (1 << type);
+ else
+ he->filtered |= (parent->filtered & (1 << type));
+ }
+}
+
+static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
+{
+ hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
+ check_thread_entry);
+
+ hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
+ perf_hpp__is_dso_entry);
+
+ hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
+ perf_hpp__is_sym_entry);
+
+ hists__apply_filters(he->hists, he);
+}
+
+static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
+ struct rb_root *root,
+ struct hist_entry *he,
+ struct hist_entry *parent_he,
+ struct perf_hpp_list *hpp_list)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct hist_entry *iter, *new;
+ struct perf_hpp_fmt *fmt;
+ int64_t cmp;
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct hist_entry, rb_node_in);
+
+ cmp = 0;
+ perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
+ cmp = fmt->collapse(fmt, iter, he);
+ if (cmp)
+ break;
+ }
+
+ if (!cmp) {
+ he_stat__add_stat(&iter->stat, &he->stat);
+ return iter;
+ }
+
+ if (cmp < 0)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+
+ new = hist_entry__new(he, true);
+ if (new == NULL)
+ return NULL;
+
+ hists->nr_entries++;
+
+ /* save related format list for output */
+ new->hpp_list = hpp_list;
+ new->parent_he = parent_he;
+
+ hist_entry__apply_hierarchy_filters(new);
+
+ /* some fields are now passed to 'new' */
+ perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
+ if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
+ he->trace_output = NULL;
+ else
+ new->trace_output = NULL;
+
+ if (perf_hpp__is_srcline_entry(fmt))
+ he->srcline = NULL;
+ else
+ new->srcline = NULL;
+
+ if (perf_hpp__is_srcfile_entry(fmt))
+ he->srcfile = NULL;
+ else
+ new->srcfile = NULL;
+ }
+
+ rb_link_node(&new->rb_node_in, parent, p);
+ rb_insert_color(&new->rb_node_in, root);
+ return new;
+}
+
+static int hists__hierarchy_insert_entry(struct hists *hists,
+ struct rb_root *root,
+ struct hist_entry *he)
+{
+ struct perf_hpp_list_node *node;
+ struct hist_entry *new_he = NULL;
+ struct hist_entry *parent = NULL;
+ int depth = 0;
+ int ret = 0;
+
+ list_for_each_entry(node, &hists->hpp_formats, list) {
+ /* skip period (overhead) and elided columns */
+ if (node->level == 0 || node->skip)
+ continue;
+
+ /* insert copy of 'he' for each fmt into the hierarchy */
+ new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
+ if (new_he == NULL) {
+ ret = -1;
+ break;
+ }
+
+ root = &new_he->hroot_in;
+ new_he->depth = depth++;
+ parent = new_he;
+ }
+
+ if (new_he) {
+ new_he->leaf = true;
+
+ if (symbol_conf.use_callchain) {
+ callchain_cursor_reset(&callchain_cursor);
+ if (callchain_merge(&callchain_cursor,
+ new_he->callchain,
+ he->callchain) < 0)
+ ret = -1;
+ }
+ }
+
+ /* 'he' is no longer used */
+ hist_entry__delete(he);
+
+ /* return 0 (or -1) since it already applied filters */
+ return ret;
+}
+
+int hists__collapse_insert_entry(struct hists *hists, struct rb_root *root,
+ struct hist_entry *he)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
int64_t cmp;
+ if (symbol_conf.report_hierarchy)
+ return hists__hierarchy_insert_entry(hists, root, he);
+
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct hist_entry, rb_node_in);
@@ -1024,18 +1313,21 @@ bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
cmp = hist_entry__collapse(iter, he);
if (!cmp) {
+ int ret = 0;
+
he_stat__add_stat(&iter->stat, &he->stat);
if (symbol_conf.cumulate_callchain)
he_stat__add_stat(iter->stat_acc, he->stat_acc);
if (symbol_conf.use_callchain) {
callchain_cursor_reset(&callchain_cursor);
- callchain_merge(&callchain_cursor,
- iter->callchain,
- he->callchain);
+ if (callchain_merge(&callchain_cursor,
+ iter->callchain,
+ he->callchain) < 0)
+ ret = -1;
}
hist_entry__delete(he);
- return false;
+ return ret;
}
if (cmp < 0)
@@ -1047,7 +1339,7 @@ bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color(&he->rb_node_in, root);
- return true;
+ return 1;
}
struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
@@ -1073,14 +1365,15 @@ static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
hists__filter_entry_by_socket(hists, he);
}
-void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
+int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
{
struct rb_root *root;
struct rb_node *next;
struct hist_entry *n;
+ int ret;
if (!sort__need_collapse)
- return;
+ return 0;
hists->nr_entries = 0;
@@ -1095,7 +1388,11 @@ void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
next = rb_next(&n->rb_node_in);
rb_erase(&n->rb_node_in, root);
- if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
+ ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
+ if (ret < 0)
+ return -1;
+
+ if (ret) {
/*
* If it wasn't combined with one of the entries already
* collapsed, we need to apply the filters that may have
@@ -1106,14 +1403,16 @@ void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
if (prog)
ui_progress__update(prog, 1);
}
+ return 0;
}
static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
{
+ struct hists *hists = a->hists;
struct perf_hpp_fmt *fmt;
int64_t cmp = 0;
- perf_hpp__for_each_sort_list(fmt) {
+ hists__for_each_sort_list(hists, fmt) {
if (perf_hpp__should_skip(fmt, a->hists))
continue;
@@ -1154,6 +1453,113 @@ void hists__inc_stats(struct hists *hists, struct hist_entry *h)
hists->stats.total_period += h->stat.period;
}
+static void hierarchy_recalc_total_periods(struct hists *hists)
+{
+ struct rb_node *node;
+ struct hist_entry *he;
+
+ node = rb_first(&hists->entries);
+
+ hists->stats.total_period = 0;
+ hists->stats.total_non_filtered_period = 0;
+
+ /*
+ * recalculate total period using top-level entries only
+ * since lower level entries only see non-filtered entries
+ * but upper level entries have sum of both entries.
+ */
+ while (node) {
+ he = rb_entry(node, struct hist_entry, rb_node);
+ node = rb_next(node);
+
+ hists->stats.total_period += he->stat.period;
+ if (!he->filtered)
+ hists->stats.total_non_filtered_period += he->stat.period;
+ }
+}
+
+static void hierarchy_insert_output_entry(struct rb_root *root,
+ struct hist_entry *he)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct hist_entry *iter;
+ struct perf_hpp_fmt *fmt;
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct hist_entry, rb_node);
+
+ if (hist_entry__sort(he, iter) > 0)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+
+ rb_link_node(&he->rb_node, parent, p);
+ rb_insert_color(&he->rb_node, root);
+
+ /* update column width of dynamic entry */
+ perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
+ if (perf_hpp__is_dynamic_entry(fmt))
+ fmt->sort(fmt, he, NULL);
+ }
+}
+
+static void hists__hierarchy_output_resort(struct hists *hists,
+ struct ui_progress *prog,
+ struct rb_root *root_in,
+ struct rb_root *root_out,
+ u64 min_callchain_hits,
+ bool use_callchain)
+{
+ struct rb_node *node;
+ struct hist_entry *he;
+
+ *root_out = RB_ROOT;
+ node = rb_first(root_in);
+
+ while (node) {
+ he = rb_entry(node, struct hist_entry, rb_node_in);
+ node = rb_next(node);
+
+ hierarchy_insert_output_entry(root_out, he);
+
+ if (prog)
+ ui_progress__update(prog, 1);
+
+ if (!he->leaf) {
+ hists__hierarchy_output_resort(hists, prog,
+ &he->hroot_in,
+ &he->hroot_out,
+ min_callchain_hits,
+ use_callchain);
+ hists->nr_entries++;
+ if (!he->filtered) {
+ hists->nr_non_filtered_entries++;
+ hists__calc_col_len(hists, he);
+ }
+
+ continue;
+ }
+
+ if (!use_callchain)
+ continue;
+
+ if (callchain_param.mode == CHAIN_GRAPH_REL) {
+ u64 total = he->stat.period;
+
+ if (symbol_conf.cumulate_callchain)
+ total = he->stat_acc->period;
+
+ min_callchain_hits = total * (callchain_param.min_percent / 100);
+ }
+
+ callchain_param.sort(&he->sorted_chain, he->callchain,
+ min_callchain_hits, &callchain_param);
+ }
+}
+
static void __hists__insert_output_entry(struct rb_root *entries,
struct hist_entry *he,
u64 min_callchain_hits,
@@ -1162,10 +1568,20 @@ static void __hists__insert_output_entry(struct rb_root *entries,
struct rb_node **p = &entries->rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
+ struct perf_hpp_fmt *fmt;
+
+ if (use_callchain) {
+ if (callchain_param.mode == CHAIN_GRAPH_REL) {
+ u64 total = he->stat.period;
+
+ if (symbol_conf.cumulate_callchain)
+ total = he->stat_acc->period;
- if (use_callchain)
+ min_callchain_hits = total * (callchain_param.min_percent / 100);
+ }
callchain_param.sort(&he->sorted_chain, he->callchain,
min_callchain_hits, &callchain_param);
+ }
while (*p != NULL) {
parent = *p;
@@ -1179,23 +1595,41 @@ static void __hists__insert_output_entry(struct rb_root *entries,
rb_link_node(&he->rb_node, parent, p);
rb_insert_color(&he->rb_node, entries);
+
+ perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
+ if (perf_hpp__is_dynamic_entry(fmt) &&
+ perf_hpp__defined_dynamic_entry(fmt, he->hists))
+ fmt->sort(fmt, he, NULL); /* update column width */
+ }
}
-void hists__output_resort(struct hists *hists, struct ui_progress *prog)
+static void output_resort(struct hists *hists, struct ui_progress *prog,
+ bool use_callchain)
{
struct rb_root *root;
struct rb_node *next;
struct hist_entry *n;
+ u64 callchain_total;
u64 min_callchain_hits;
- struct perf_evsel *evsel = hists_to_evsel(hists);
- bool use_callchain;
- if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
- use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
- else
- use_callchain = symbol_conf.use_callchain;
+ callchain_total = hists->callchain_period;
+ if (symbol_conf.filter_relative)
+ callchain_total = hists->callchain_non_filtered_period;
- min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
+ min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
+
+ hists__reset_stats(hists);
+ hists__reset_col_len(hists);
+
+ if (symbol_conf.report_hierarchy) {
+ hists__hierarchy_output_resort(hists, prog,
+ &hists->entries_collapsed,
+ &hists->entries,
+ min_callchain_hits,
+ use_callchain);
+ hierarchy_recalc_total_periods(hists);
+ return;
+ }
if (sort__need_collapse)
root = &hists->entries_collapsed;
@@ -1205,9 +1639,6 @@ void hists__output_resort(struct hists *hists, struct ui_progress *prog)
next = rb_first(root);
hists->entries = RB_ROOT;
- hists__reset_stats(hists);
- hists__reset_col_len(hists);
-
while (next) {
n = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&n->rb_node_in);
@@ -1223,15 +1654,136 @@ void hists__output_resort(struct hists *hists, struct ui_progress *prog)
}
}
+void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
+{
+ bool use_callchain;
+
+ if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
+ use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
+ else
+ use_callchain = symbol_conf.use_callchain;
+
+ output_resort(evsel__hists(evsel), prog, use_callchain);
+}
+
+void hists__output_resort(struct hists *hists, struct ui_progress *prog)
+{
+ output_resort(hists, prog, symbol_conf.use_callchain);
+}
+
+static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
+{
+ if (he->leaf || hmd == HMD_FORCE_SIBLING)
+ return false;
+
+ if (he->unfolded || hmd == HMD_FORCE_CHILD)
+ return true;
+
+ return false;
+}
+
+struct rb_node *rb_hierarchy_last(struct rb_node *node)
+{
+ struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
+
+ while (can_goto_child(he, HMD_NORMAL)) {
+ node = rb_last(&he->hroot_out);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ }
+ return node;
+}
+
+struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
+{
+ struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
+
+ if (can_goto_child(he, hmd))
+ node = rb_first(&he->hroot_out);
+ else
+ node = rb_next(node);
+
+ while (node == NULL) {
+ he = he->parent_he;
+ if (he == NULL)
+ break;
+
+ node = rb_next(&he->rb_node);
+ }
+ return node;
+}
+
+struct rb_node *rb_hierarchy_prev(struct rb_node *node)
+{
+ struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
+
+ node = rb_prev(node);
+ if (node)
+ return rb_hierarchy_last(node);
+
+ he = he->parent_he;
+ if (he == NULL)
+ return NULL;
+
+ return &he->rb_node;
+}
+
+bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
+{
+ struct rb_node *node;
+ struct hist_entry *child;
+ float percent;
+
+ if (he->leaf)
+ return false;
+
+ node = rb_first(&he->hroot_out);
+ child = rb_entry(node, struct hist_entry, rb_node);
+
+ while (node && child->filtered) {
+ node = rb_next(node);
+ child = rb_entry(node, struct hist_entry, rb_node);
+ }
+
+ if (node)
+ percent = hist_entry__get_percent_limit(child);
+ else
+ percent = 0;
+
+ return node && percent >= limit;
+}
+
static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
enum hist_filter filter)
{
h->filtered &= ~(1 << filter);
+
+ if (symbol_conf.report_hierarchy) {
+ struct hist_entry *parent = h->parent_he;
+
+ while (parent) {
+ he_stat__add_stat(&parent->stat, &h->stat);
+
+ parent->filtered &= ~(1 << filter);
+
+ if (parent->filtered)
+ goto next;
+
+ /* force fold unfiltered entry for simplicity */
+ parent->unfolded = false;
+ parent->has_no_entry = false;
+ parent->row_offset = 0;
+ parent->nr_rows = 0;
+next:
+ parent = parent->parent_he;
+ }
+ }
+
if (h->filtered)
return;
/* force fold unfiltered entry for simplicity */
h->unfolded = false;
+ h->has_no_entry = false;
h->row_offset = 0;
h->nr_rows = 0;
@@ -1254,28 +1806,6 @@ static bool hists__filter_entry_by_dso(struct hists *hists,
return false;
}
-void hists__filter_by_dso(struct hists *hists)
-{
- struct rb_node *nd;
-
- hists->stats.nr_non_filtered_samples = 0;
-
- hists__reset_filter_stats(hists);
- hists__reset_col_len(hists);
-
- for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
- struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
-
- if (symbol_conf.exclude_other && !h->parent)
- continue;
-
- if (hists__filter_entry_by_dso(hists, h))
- continue;
-
- hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
- }
-}
-
static bool hists__filter_entry_by_thread(struct hists *hists,
struct hist_entry *he)
{
@@ -1288,25 +1818,6 @@ static bool hists__filter_entry_by_thread(struct hists *hists,
return false;
}
-void hists__filter_by_thread(struct hists *hists)
-{
- struct rb_node *nd;
-
- hists->stats.nr_non_filtered_samples = 0;
-
- hists__reset_filter_stats(hists);
- hists__reset_col_len(hists);
-
- for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
- struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
-
- if (hists__filter_entry_by_thread(hists, h))
- continue;
-
- hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
- }
-}
-
static bool hists__filter_entry_by_symbol(struct hists *hists,
struct hist_entry *he)
{
@@ -1320,7 +1831,21 @@ static bool hists__filter_entry_by_symbol(struct hists *hists,
return false;
}
-void hists__filter_by_symbol(struct hists *hists)
+static bool hists__filter_entry_by_socket(struct hists *hists,
+ struct hist_entry *he)
+{
+ if ((hists->socket_filter > -1) &&
+ (he->socket != hists->socket_filter)) {
+ he->filtered |= (1 << HIST_FILTER__SOCKET);
+ return true;
+ }
+
+ return false;
+}
+
+typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
+
+static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
{
struct rb_node *nd;
@@ -1332,42 +1857,155 @@ void hists__filter_by_symbol(struct hists *hists)
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
- if (hists__filter_entry_by_symbol(hists, h))
+ if (filter(hists, h))
continue;
- hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
+ hists__remove_entry_filter(hists, h, type);
}
}
-static bool hists__filter_entry_by_socket(struct hists *hists,
- struct hist_entry *he)
+static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
{
- if ((hists->socket_filter > -1) &&
- (he->socket != hists->socket_filter)) {
- he->filtered |= (1 << HIST_FILTER__SOCKET);
- return true;
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct hist_entry *iter;
+ struct rb_root new_root = RB_ROOT;
+ struct rb_node *nd;
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct hist_entry, rb_node);
+
+ if (hist_entry__sort(he, iter) > 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
}
- return false;
+ rb_link_node(&he->rb_node, parent, p);
+ rb_insert_color(&he->rb_node, root);
+
+ if (he->leaf || he->filtered)
+ return;
+
+ nd = rb_first(&he->hroot_out);
+ while (nd) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+ nd = rb_next(nd);
+ rb_erase(&h->rb_node, &he->hroot_out);
+
+ resort_filtered_entry(&new_root, h);
+ }
+
+ he->hroot_out = new_root;
}
-void hists__filter_by_socket(struct hists *hists)
+static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
{
struct rb_node *nd;
+ struct rb_root new_root = RB_ROOT;
hists->stats.nr_non_filtered_samples = 0;
hists__reset_filter_stats(hists);
hists__reset_col_len(hists);
- for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+ nd = rb_first(&hists->entries);
+ while (nd) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+ int ret;
- if (hists__filter_entry_by_socket(hists, h))
- continue;
+ ret = hist_entry__filter(h, type, arg);
- hists__remove_entry_filter(hists, h, HIST_FILTER__SOCKET);
+ /*
+ * case 1. non-matching type
+ * zero out the period, set filter marker and move to child
+ */
+ if (ret < 0) {
+ memset(&h->stat, 0, sizeof(h->stat));
+ h->filtered |= (1 << type);
+
+ nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
+ }
+ /*
+ * case 2. matched type (filter out)
+ * set filter marker and move to next
+ */
+ else if (ret == 1) {
+ h->filtered |= (1 << type);
+
+ nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
+ }
+ /*
+ * case 3. ok (not filtered)
+ * add period to hists and parents, erase the filter marker
+ * and move to next sibling
+ */
+ else {
+ hists__remove_entry_filter(hists, h, type);
+
+ nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
+ }
+ }
+
+ hierarchy_recalc_total_periods(hists);
+
+ /*
+ * resort output after applying a new filter since filter in a lower
+ * hierarchy can change periods in a upper hierarchy.
+ */
+ nd = rb_first(&hists->entries);
+ while (nd) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+ nd = rb_next(nd);
+ rb_erase(&h->rb_node, &hists->entries);
+
+ resort_filtered_entry(&new_root, h);
}
+
+ hists->entries = new_root;
+}
+
+void hists__filter_by_thread(struct hists *hists)
+{
+ if (symbol_conf.report_hierarchy)
+ hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
+ hists->thread_filter);
+ else
+ hists__filter_by_type(hists, HIST_FILTER__THREAD,
+ hists__filter_entry_by_thread);
+}
+
+void hists__filter_by_dso(struct hists *hists)
+{
+ if (symbol_conf.report_hierarchy)
+ hists__filter_hierarchy(hists, HIST_FILTER__DSO,
+ hists->dso_filter);
+ else
+ hists__filter_by_type(hists, HIST_FILTER__DSO,
+ hists__filter_entry_by_dso);
+}
+
+void hists__filter_by_symbol(struct hists *hists)
+{
+ if (symbol_conf.report_hierarchy)
+ hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
+ hists->symbol_filter_str);
+ else
+ hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
+ hists__filter_entry_by_symbol);
+}
+
+void hists__filter_by_socket(struct hists *hists)
+{
+ if (symbol_conf.report_hierarchy)
+ hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
+ &hists->socket_filter);
+ else
+ hists__filter_by_type(hists, HIST_FILTER__SOCKET,
+ hists__filter_entry_by_socket);
}
void events_stats__inc(struct events_stats *stats, u32 type)
@@ -1585,7 +2223,7 @@ int perf_hist_config(const char *var, const char *value)
return 0;
}
-int __hists__init(struct hists *hists)
+int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
{
memset(hists, 0, sizeof(*hists));
hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
@@ -1594,6 +2232,8 @@ int __hists__init(struct hists *hists)
hists->entries = RB_ROOT;
pthread_mutex_init(&hists->lock, NULL);
hists->socket_filter = -1;
+ hists->hpp_list = hpp_list;
+ INIT_LIST_HEAD(&hists->hpp_formats);
return 0;
}
@@ -1622,15 +2262,26 @@ static void hists__delete_all_entries(struct hists *hists)
static void hists_evsel__exit(struct perf_evsel *evsel)
{
struct hists *hists = evsel__hists(evsel);
+ struct perf_hpp_fmt *fmt, *pos;
+ struct perf_hpp_list_node *node, *tmp;
hists__delete_all_entries(hists);
+
+ list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
+ perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
+ list_del(&fmt->list);
+ free(fmt);
+ }
+ list_del(&node->list);
+ free(node);
+ }
}
static int hists_evsel__init(struct perf_evsel *evsel)
{
struct hists *hists = evsel__hists(evsel);
- __hists__init(hists);
+ __hists__init(hists, &perf_hpp_list);
return 0;
}
@@ -1649,3 +2300,9 @@ int hists__init(void)
return err;
}
+
+void perf_hpp_list__init(struct perf_hpp_list *list)
+{
+ INIT_LIST_HEAD(&list->fields);
+ INIT_LIST_HEAD(&list->sorts);
+}
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index d4ec4822a103..ead18c82294f 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -66,6 +66,8 @@ struct hists {
struct rb_root entries_collapsed;
u64 nr_entries;
u64 nr_non_filtered_entries;
+ u64 callchain_period;
+ u64 callchain_non_filtered_period;
struct thread *thread_filter;
const struct dso *dso_filter;
const char *uid_filter_str;
@@ -75,6 +77,9 @@ struct hists {
u64 event_stream;
u16 col_len[HISTC_NR_COLS];
int socket_filter;
+ struct perf_hpp_list *hpp_list;
+ struct list_head hpp_formats;
+ int nr_hpp_node;
};
struct hist_entry_iter;
@@ -121,15 +126,21 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
int max_stack_depth, void *arg);
+struct perf_hpp;
+struct perf_hpp_fmt;
+
int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right);
int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right);
int hist_entry__transaction_len(void);
int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size,
struct hists *hists);
+int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
+ struct perf_hpp_fmt *fmt, int printed);
void hist_entry__delete(struct hist_entry *he);
+void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog);
void hists__output_resort(struct hists *hists, struct ui_progress *prog);
-void hists__collapse_resort(struct hists *hists, struct ui_progress *prog);
+int hists__collapse_resort(struct hists *hists, struct ui_progress *prog);
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel);
void hists__delete_entries(struct hists *hists);
@@ -185,10 +196,10 @@ static inline struct hists *evsel__hists(struct perf_evsel *evsel)
}
int hists__init(void);
-int __hists__init(struct hists *hists);
+int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list);
struct rb_root *hists__get_rotate_entries_in(struct hists *hists);
-bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
+int hists__collapse_insert_entry(struct hists *hists,
struct rb_root *root, struct hist_entry *he);
struct perf_hpp {
@@ -214,28 +225,64 @@ struct perf_hpp_fmt {
struct hist_entry *a, struct hist_entry *b);
int64_t (*sort)(struct perf_hpp_fmt *fmt,
struct hist_entry *a, struct hist_entry *b);
+ bool (*equal)(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b);
+ void (*free)(struct perf_hpp_fmt *fmt);
struct list_head list;
struct list_head sort_list;
bool elide;
int len;
int user_len;
+ int idx;
+ int level;
+};
+
+struct perf_hpp_list {
+ struct list_head fields;
+ struct list_head sorts;
};
-extern struct list_head perf_hpp__list;
-extern struct list_head perf_hpp__sort_list;
+extern struct perf_hpp_list perf_hpp_list;
+
+struct perf_hpp_list_node {
+ struct list_head list;
+ struct perf_hpp_list hpp;
+ int level;
+ bool skip;
+};
+
+void perf_hpp_list__column_register(struct perf_hpp_list *list,
+ struct perf_hpp_fmt *format);
+void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
+ struct perf_hpp_fmt *format);
+
+static inline void perf_hpp__column_register(struct perf_hpp_fmt *format)
+{
+ perf_hpp_list__column_register(&perf_hpp_list, format);
+}
+
+static inline void perf_hpp__register_sort_field(struct perf_hpp_fmt *format)
+{
+ perf_hpp_list__register_sort_field(&perf_hpp_list, format);
+}
+
+#define perf_hpp_list__for_each_format(_list, format) \
+ list_for_each_entry(format, &(_list)->fields, list)
-#define perf_hpp__for_each_format(format) \
- list_for_each_entry(format, &perf_hpp__list, list)
+#define perf_hpp_list__for_each_format_safe(_list, format, tmp) \
+ list_for_each_entry_safe(format, tmp, &(_list)->fields, list)
-#define perf_hpp__for_each_format_safe(format, tmp) \
- list_for_each_entry_safe(format, tmp, &perf_hpp__list, list)
+#define perf_hpp_list__for_each_sort_list(_list, format) \
+ list_for_each_entry(format, &(_list)->sorts, sort_list)
-#define perf_hpp__for_each_sort_list(format) \
- list_for_each_entry(format, &perf_hpp__sort_list, sort_list)
+#define perf_hpp_list__for_each_sort_list_safe(_list, format, tmp) \
+ list_for_each_entry_safe(format, tmp, &(_list)->sorts, sort_list)
-#define perf_hpp__for_each_sort_list_safe(format, tmp) \
- list_for_each_entry_safe(format, tmp, &perf_hpp__sort_list, sort_list)
+#define hists__for_each_format(hists, format) \
+ perf_hpp_list__for_each_format((hists)->hpp_list, fmt)
+
+#define hists__for_each_sort_list(hists, format) \
+ perf_hpp_list__for_each_sort_list((hists)->hpp_list, fmt)
extern struct perf_hpp_fmt perf_hpp__format[];
@@ -254,21 +301,29 @@ enum {
};
void perf_hpp__init(void);
-void perf_hpp__column_register(struct perf_hpp_fmt *format);
void perf_hpp__column_unregister(struct perf_hpp_fmt *format);
-void perf_hpp__column_enable(unsigned col);
-void perf_hpp__column_disable(unsigned col);
void perf_hpp__cancel_cumulate(void);
+void perf_hpp__setup_output_field(struct perf_hpp_list *list);
+void perf_hpp__reset_output_field(struct perf_hpp_list *list);
+void perf_hpp__append_sort_keys(struct perf_hpp_list *list);
+int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
+ struct perf_evlist *evlist);
-void perf_hpp__register_sort_field(struct perf_hpp_fmt *format);
-void perf_hpp__setup_output_field(void);
-void perf_hpp__reset_output_field(void);
-void perf_hpp__append_sort_keys(void);
bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format);
-bool perf_hpp__same_sort_entry(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b);
bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *format);
bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists);
+bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt);
+bool perf_hpp__is_srcline_entry(struct perf_hpp_fmt *fmt);
+bool perf_hpp__is_srcfile_entry(struct perf_hpp_fmt *fmt);
+bool perf_hpp__is_thread_entry(struct perf_hpp_fmt *fmt);
+bool perf_hpp__is_comm_entry(struct perf_hpp_fmt *fmt);
+bool perf_hpp__is_dso_entry(struct perf_hpp_fmt *fmt);
+bool perf_hpp__is_sym_entry(struct perf_hpp_fmt *fmt);
+
+struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt);
+
+int hist_entry__filter(struct hist_entry *he, int type, const void *arg);
static inline bool perf_hpp__should_skip(struct perf_hpp_fmt *format,
struct hists *hists)
@@ -372,6 +427,7 @@ static inline int script_browse(const char *script_opt __maybe_unused)
#endif
unsigned int hists__sort_list_width(struct hists *hists);
+unsigned int hists__overhead_width(struct hists *hists);
void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
struct perf_sample *sample, bool nonany_branch_mode);
@@ -381,4 +437,26 @@ int parse_filter_percentage(const struct option *opt __maybe_unused,
const char *arg, int unset __maybe_unused);
int perf_hist_config(const char *var, const char *value);
+void perf_hpp_list__init(struct perf_hpp_list *list);
+
+enum hierarchy_move_dir {
+ HMD_NORMAL,
+ HMD_FORCE_SIBLING,
+ HMD_FORCE_CHILD,
+};
+
+struct rb_node *rb_hierarchy_last(struct rb_node *node);
+struct rb_node *__rb_hierarchy_next(struct rb_node *node,
+ enum hierarchy_move_dir hmd);
+struct rb_node *rb_hierarchy_prev(struct rb_node *node);
+
+static inline struct rb_node *rb_hierarchy_next(struct rb_node *node)
+{
+ return __rb_hierarchy_next(node, HMD_NORMAL);
+}
+
+#define HIERARCHY_INDENT 3
+
+bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit);
+
#endif /* __PERF_HIST_H */
diff --git a/tools/perf/util/jit.h b/tools/perf/util/jit.h
new file mode 100644
index 000000000000..a1e99da0715a
--- /dev/null
+++ b/tools/perf/util/jit.h
@@ -0,0 +1,15 @@
+#ifndef __JIT_H__
+#define __JIT_H__
+
+#include <data.h>
+
+extern int jit_process(struct perf_session *session,
+ struct perf_data_file *output,
+ struct machine *machine,
+ char *filename,
+ pid_t pid,
+ u64 *nbytes);
+
+extern int jit_inject_record(const char *filename);
+
+#endif /* __JIT_H__ */
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
new file mode 100644
index 000000000000..cd272cc21e05
--- /dev/null
+++ b/tools/perf/util/jitdump.c
@@ -0,0 +1,697 @@
+#include <sys/types.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <byteswap.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+
+#include "util.h"
+#include "event.h"
+#include "debug.h"
+#include "evlist.h"
+#include "symbol.h"
+#include "strlist.h"
+#include <elf.h>
+
+#include "session.h"
+#include "jit.h"
+#include "jitdump.h"
+#include "genelf.h"
+#include "../builtin.h"
+
+struct jit_buf_desc {
+ struct perf_data_file *output;
+ struct perf_session *session;
+ struct machine *machine;
+ union jr_entry *entry;
+ void *buf;
+ uint64_t sample_type;
+ size_t bufsize;
+ FILE *in;
+ bool needs_bswap; /* handles cross-endianess */
+ void *debug_data;
+ size_t nr_debug_entries;
+ uint32_t code_load_count;
+ u64 bytes_written;
+ struct rb_root code_root;
+ char dir[PATH_MAX];
+};
+
+struct debug_line_info {
+ unsigned long vma;
+ unsigned int lineno;
+ /* The filename format is unspecified, absolute path, relative etc. */
+ char const filename[0];
+};
+
+struct jit_tool {
+ struct perf_tool tool;
+ struct perf_data_file output;
+ struct perf_data_file input;
+ u64 bytes_written;
+};
+
+#define hmax(a, b) ((a) > (b) ? (a) : (b))
+#define get_jit_tool(t) (container_of(tool, struct jit_tool, tool))
+
+static int
+jit_emit_elf(char *filename,
+ const char *sym,
+ uint64_t code_addr,
+ const void *code,
+ int csize,
+ void *debug,
+ int nr_debug_entries)
+{
+ int ret, fd;
+
+ if (verbose > 0)
+ fprintf(stderr, "write ELF image %s\n", filename);
+
+ fd = open(filename, O_CREAT|O_TRUNC|O_WRONLY, 0644);
+ if (fd == -1) {
+ pr_warning("cannot create jit ELF %s: %s\n", filename, strerror(errno));
+ return -1;
+ }
+
+ ret = jit_write_elf(fd, code_addr, sym, (const void *)code, csize, debug, nr_debug_entries);
+
+ close(fd);
+
+ if (ret)
+ unlink(filename);
+
+ return ret;
+}
+
+static void
+jit_close(struct jit_buf_desc *jd)
+{
+ if (!(jd && jd->in))
+ return;
+ funlockfile(jd->in);
+ fclose(jd->in);
+ jd->in = NULL;
+}
+
+static int
+jit_validate_events(struct perf_session *session)
+{
+ struct perf_evsel *evsel;
+
+ /*
+ * check that all events use CLOCK_MONOTONIC
+ */
+ evlist__for_each(session->evlist, evsel) {
+ if (evsel->attr.use_clockid == 0 || evsel->attr.clockid != CLOCK_MONOTONIC)
+ return -1;
+ }
+ return 0;
+}
+
+static int
+jit_open(struct jit_buf_desc *jd, const char *name)
+{
+ struct jitheader header;
+ struct jr_prefix *prefix;
+ ssize_t bs, bsz = 0;
+ void *n, *buf = NULL;
+ int ret, retval = -1;
+
+ jd->in = fopen(name, "r");
+ if (!jd->in)
+ return -1;
+
+ bsz = hmax(sizeof(header), sizeof(*prefix));
+
+ buf = malloc(bsz);
+ if (!buf)
+ goto error;
+
+ /*
+ * protect from writer modifying the file while we are reading it
+ */
+ flockfile(jd->in);
+
+ ret = fread(buf, sizeof(header), 1, jd->in);
+ if (ret != 1)
+ goto error;
+
+ memcpy(&header, buf, sizeof(header));
+
+ if (header.magic != JITHEADER_MAGIC) {
+ if (header.magic != JITHEADER_MAGIC_SW)
+ goto error;
+ jd->needs_bswap = true;
+ }
+
+ if (jd->needs_bswap) {
+ header.version = bswap_32(header.version);
+ header.total_size = bswap_32(header.total_size);
+ header.pid = bswap_32(header.pid);
+ header.elf_mach = bswap_32(header.elf_mach);
+ header.timestamp = bswap_64(header.timestamp);
+ header.flags = bswap_64(header.flags);
+ }
+
+ if (verbose > 2)
+ pr_debug("version=%u\nhdr.size=%u\nts=0x%llx\npid=%d\nelf_mach=%d\n",
+ header.version,
+ header.total_size,
+ (unsigned long long)header.timestamp,
+ header.pid,
+ header.elf_mach);
+
+ if (header.flags & JITDUMP_FLAGS_RESERVED) {
+ pr_err("jitdump file contains invalid or unsupported flags 0x%llx\n",
+ (unsigned long long)header.flags & JITDUMP_FLAGS_RESERVED);
+ goto error;
+ }
+
+ /*
+ * validate event is using the correct clockid
+ */
+ if (jit_validate_events(jd->session)) {
+ pr_err("error, jitted code must be sampled with perf record -k 1\n");
+ goto error;
+ }
+
+ bs = header.total_size - sizeof(header);
+
+ if (bs > bsz) {
+ n = realloc(buf, bs);
+ if (!n)
+ goto error;
+ bsz = bs;
+ buf = n;
+ /* read extra we do not know about */
+ ret = fread(buf, bs - bsz, 1, jd->in);
+ if (ret != 1)
+ goto error;
+ }
+ /*
+ * keep dirname for generating files and mmap records
+ */
+ strcpy(jd->dir, name);
+ dirname(jd->dir);
+
+ return 0;
+error:
+ funlockfile(jd->in);
+ fclose(jd->in);
+ return retval;
+}
+
+static union jr_entry *
+jit_get_next_entry(struct jit_buf_desc *jd)
+{
+ struct jr_prefix *prefix;
+ union jr_entry *jr;
+ void *addr;
+ size_t bs, size;
+ int id, ret;
+
+ if (!(jd && jd->in))
+ return NULL;
+
+ if (jd->buf == NULL) {
+ size_t sz = getpagesize();
+ if (sz < sizeof(*prefix))
+ sz = sizeof(*prefix);
+
+ jd->buf = malloc(sz);
+ if (jd->buf == NULL)
+ return NULL;
+
+ jd->bufsize = sz;
+ }
+
+ prefix = jd->buf;
+
+ /*
+ * file is still locked at this point
+ */
+ ret = fread(prefix, sizeof(*prefix), 1, jd->in);
+ if (ret != 1)
+ return NULL;
+
+ if (jd->needs_bswap) {
+ prefix->id = bswap_32(prefix->id);
+ prefix->total_size = bswap_32(prefix->total_size);
+ prefix->timestamp = bswap_64(prefix->timestamp);
+ }
+ id = prefix->id;
+ size = prefix->total_size;
+
+ bs = (size_t)size;
+ if (bs < sizeof(*prefix))
+ return NULL;
+
+ if (id >= JIT_CODE_MAX) {
+ pr_warning("next_entry: unknown prefix %d, skipping\n", id);
+ return NULL;
+ }
+ if (bs > jd->bufsize) {
+ void *n;
+ n = realloc(jd->buf, bs);
+ if (!n)
+ return NULL;
+ jd->buf = n;
+ jd->bufsize = bs;
+ }
+
+ addr = ((void *)jd->buf) + sizeof(*prefix);
+
+ ret = fread(addr, bs - sizeof(*prefix), 1, jd->in);
+ if (ret != 1)
+ return NULL;
+
+ jr = (union jr_entry *)jd->buf;
+
+ switch(id) {
+ case JIT_CODE_DEBUG_INFO:
+ if (jd->needs_bswap) {
+ uint64_t n;
+ jr->info.code_addr = bswap_64(jr->info.code_addr);
+ jr->info.nr_entry = bswap_64(jr->info.nr_entry);
+ for (n = 0 ; n < jr->info.nr_entry; n++) {
+ jr->info.entries[n].addr = bswap_64(jr->info.entries[n].addr);
+ jr->info.entries[n].lineno = bswap_32(jr->info.entries[n].lineno);
+ jr->info.entries[n].discrim = bswap_32(jr->info.entries[n].discrim);
+ }
+ }
+ break;
+ case JIT_CODE_CLOSE:
+ break;
+ case JIT_CODE_LOAD:
+ if (jd->needs_bswap) {
+ jr->load.pid = bswap_32(jr->load.pid);
+ jr->load.tid = bswap_32(jr->load.tid);
+ jr->load.vma = bswap_64(jr->load.vma);
+ jr->load.code_addr = bswap_64(jr->load.code_addr);
+ jr->load.code_size = bswap_64(jr->load.code_size);
+ jr->load.code_index= bswap_64(jr->load.code_index);
+ }
+ jd->code_load_count++;
+ break;
+ case JIT_CODE_MOVE:
+ if (jd->needs_bswap) {
+ jr->move.pid = bswap_32(jr->move.pid);
+ jr->move.tid = bswap_32(jr->move.tid);
+ jr->move.vma = bswap_64(jr->move.vma);
+ jr->move.old_code_addr = bswap_64(jr->move.old_code_addr);
+ jr->move.new_code_addr = bswap_64(jr->move.new_code_addr);
+ jr->move.code_size = bswap_64(jr->move.code_size);
+ jr->move.code_index = bswap_64(jr->move.code_index);
+ }
+ break;
+ case JIT_CODE_MAX:
+ default:
+ return NULL;
+ }
+ return jr;
+}
+
+static int
+jit_inject_event(struct jit_buf_desc *jd, union perf_event *event)
+{
+ ssize_t size;
+
+ size = perf_data_file__write(jd->output, event, event->header.size);
+ if (size < 0)
+ return -1;
+
+ jd->bytes_written += size;
+ return 0;
+}
+
+static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
+{
+ struct perf_sample sample;
+ union perf_event *event;
+ struct perf_tool *tool = jd->session->tool;
+ uint64_t code, addr;
+ uintptr_t uaddr;
+ char *filename;
+ struct stat st;
+ size_t size;
+ u16 idr_size;
+ const char *sym;
+ uint32_t count;
+ int ret, csize;
+ pid_t pid, tid;
+ struct {
+ u32 pid, tid;
+ u64 time;
+ } *id;
+
+ pid = jr->load.pid;
+ tid = jr->load.tid;
+ csize = jr->load.code_size;
+ addr = jr->load.code_addr;
+ sym = (void *)((unsigned long)jr + sizeof(jr->load));
+ code = (unsigned long)jr + jr->load.p.total_size - csize;
+ count = jr->load.code_index;
+ idr_size = jd->machine->id_hdr_size;
+
+ event = calloc(1, sizeof(*event) + idr_size);
+ if (!event)
+ return -1;
+
+ filename = event->mmap2.filename;
+ size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%u.so",
+ jd->dir,
+ pid,
+ count);
+
+ size++; /* for \0 */
+
+ size = PERF_ALIGN(size, sizeof(u64));
+ uaddr = (uintptr_t)code;
+ ret = jit_emit_elf(filename, sym, addr, (const void *)uaddr, csize, jd->debug_data, jd->nr_debug_entries);
+
+ if (jd->debug_data && jd->nr_debug_entries) {
+ free(jd->debug_data);
+ jd->debug_data = NULL;
+ jd->nr_debug_entries = 0;
+ }
+
+ if (ret) {
+ free(event);
+ return -1;
+ }
+ if (stat(filename, &st))
+ memset(&st, 0, sizeof(stat));
+
+ event->mmap2.header.type = PERF_RECORD_MMAP2;
+ event->mmap2.header.misc = PERF_RECORD_MISC_USER;
+ event->mmap2.header.size = (sizeof(event->mmap2) -
+ (sizeof(event->mmap2.filename) - size) + idr_size);
+
+ event->mmap2.pgoff = GEN_ELF_TEXT_OFFSET;
+ event->mmap2.start = addr;
+ event->mmap2.len = csize;
+ event->mmap2.pid = pid;
+ event->mmap2.tid = tid;
+ event->mmap2.ino = st.st_ino;
+ event->mmap2.maj = major(st.st_dev);
+ event->mmap2.min = minor(st.st_dev);
+ event->mmap2.prot = st.st_mode;
+ event->mmap2.flags = MAP_SHARED;
+ event->mmap2.ino_generation = 1;
+
+ id = (void *)((unsigned long)event + event->mmap.header.size - idr_size);
+ if (jd->sample_type & PERF_SAMPLE_TID) {
+ id->pid = pid;
+ id->tid = tid;
+ }
+ if (jd->sample_type & PERF_SAMPLE_TIME)
+ id->time = jr->load.p.timestamp;
+
+ /*
+ * create pseudo sample to induce dso hit increment
+ * use first address as sample address
+ */
+ memset(&sample, 0, sizeof(sample));
+ sample.pid = pid;
+ sample.tid = tid;
+ sample.time = id->time;
+ sample.ip = addr;
+
+ ret = perf_event__process_mmap2(tool, event, &sample, jd->machine);
+ if (ret)
+ return ret;
+
+ ret = jit_inject_event(jd, event);
+ /*
+ * mark dso as use to generate buildid in the header
+ */
+ if (!ret)
+ build_id__mark_dso_hit(tool, event, &sample, NULL, jd->machine);
+
+ return ret;
+}
+
+static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr)
+{
+ struct perf_sample sample;
+ union perf_event *event;
+ struct perf_tool *tool = jd->session->tool;
+ char *filename;
+ size_t size;
+ struct stat st;
+ u16 idr_size;
+ int ret;
+ pid_t pid, tid;
+ struct {
+ u32 pid, tid;
+ u64 time;
+ } *id;
+
+ pid = jr->move.pid;
+ tid = jr->move.tid;
+ idr_size = jd->machine->id_hdr_size;
+
+ /*
+ * +16 to account for sample_id_all (hack)
+ */
+ event = calloc(1, sizeof(*event) + 16);
+ if (!event)
+ return -1;
+
+ filename = event->mmap2.filename;
+ size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%"PRIu64,
+ jd->dir,
+ pid,
+ jr->move.code_index);
+
+ size++; /* for \0 */
+
+ if (stat(filename, &st))
+ memset(&st, 0, sizeof(stat));
+
+ size = PERF_ALIGN(size, sizeof(u64));
+
+ event->mmap2.header.type = PERF_RECORD_MMAP2;
+ event->mmap2.header.misc = PERF_RECORD_MISC_USER;
+ event->mmap2.header.size = (sizeof(event->mmap2) -
+ (sizeof(event->mmap2.filename) - size) + idr_size);
+ event->mmap2.pgoff = GEN_ELF_TEXT_OFFSET;
+ event->mmap2.start = jr->move.new_code_addr;
+ event->mmap2.len = jr->move.code_size;
+ event->mmap2.pid = pid;
+ event->mmap2.tid = tid;
+ event->mmap2.ino = st.st_ino;
+ event->mmap2.maj = major(st.st_dev);
+ event->mmap2.min = minor(st.st_dev);
+ event->mmap2.prot = st.st_mode;
+ event->mmap2.flags = MAP_SHARED;
+ event->mmap2.ino_generation = 1;
+
+ id = (void *)((unsigned long)event + event->mmap.header.size - idr_size);
+ if (jd->sample_type & PERF_SAMPLE_TID) {
+ id->pid = pid;
+ id->tid = tid;
+ }
+ if (jd->sample_type & PERF_SAMPLE_TIME)
+ id->time = jr->load.p.timestamp;
+
+ /*
+ * create pseudo sample to induce dso hit increment
+ * use first address as sample address
+ */
+ memset(&sample, 0, sizeof(sample));
+ sample.pid = pid;
+ sample.tid = tid;
+ sample.time = id->time;
+ sample.ip = jr->move.new_code_addr;
+
+ ret = perf_event__process_mmap2(tool, event, &sample, jd->machine);
+ if (ret)
+ return ret;
+
+ ret = jit_inject_event(jd, event);
+ if (!ret)
+ build_id__mark_dso_hit(tool, event, &sample, NULL, jd->machine);
+
+ return ret;
+}
+
+static int jit_repipe_debug_info(struct jit_buf_desc *jd, union jr_entry *jr)
+{
+ void *data;
+ size_t sz;
+
+ if (!(jd && jr))
+ return -1;
+
+ sz = jr->prefix.total_size - sizeof(jr->info);
+ data = malloc(sz);
+ if (!data)
+ return -1;
+
+ memcpy(data, &jr->info.entries, sz);
+
+ jd->debug_data = data;
+
+ /*
+ * we must use nr_entry instead of size here because
+ * we cannot distinguish actual entry from padding otherwise
+ */
+ jd->nr_debug_entries = jr->info.nr_entry;
+
+ return 0;
+}
+
+static int
+jit_process_dump(struct jit_buf_desc *jd)
+{
+ union jr_entry *jr;
+ int ret;
+
+ while ((jr = jit_get_next_entry(jd))) {
+ switch(jr->prefix.id) {
+ case JIT_CODE_LOAD:
+ ret = jit_repipe_code_load(jd, jr);
+ break;
+ case JIT_CODE_MOVE:
+ ret = jit_repipe_code_move(jd, jr);
+ break;
+ case JIT_CODE_DEBUG_INFO:
+ ret = jit_repipe_debug_info(jd, jr);
+ break;
+ default:
+ ret = 0;
+ continue;
+ }
+ }
+ return ret;
+}
+
+static int
+jit_inject(struct jit_buf_desc *jd, char *path)
+{
+ int ret;
+
+ if (verbose > 0)
+ fprintf(stderr, "injecting: %s\n", path);
+
+ ret = jit_open(jd, path);
+ if (ret)
+ return -1;
+
+ ret = jit_process_dump(jd);
+
+ jit_close(jd);
+
+ if (verbose > 0)
+ fprintf(stderr, "injected: %s (%d)\n", path, ret);
+
+ return 0;
+}
+
+/*
+ * File must be with pattern .../jit-XXXX.dump
+ * where XXXX is the PID of the process which did the mmap()
+ * as captured in the RECORD_MMAP record
+ */
+static int
+jit_detect(char *mmap_name, pid_t pid)
+ {
+ char *p;
+ char *end = NULL;
+ pid_t pid2;
+
+ if (verbose > 2)
+ fprintf(stderr, "jit marker trying : %s\n", mmap_name);
+ /*
+ * get file name
+ */
+ p = strrchr(mmap_name, '/');
+ if (!p)
+ return -1;
+
+ /*
+ * match prefix
+ */
+ if (strncmp(p, "/jit-", 5))
+ return -1;
+
+ /*
+ * skip prefix
+ */
+ p += 5;
+
+ /*
+ * must be followed by a pid
+ */
+ if (!isdigit(*p))
+ return -1;
+
+ pid2 = (int)strtol(p, &end, 10);
+ if (!end)
+ return -1;
+
+ /*
+ * pid does not match mmap pid
+ * pid==0 in system-wide mode (synthesized)
+ */
+ if (pid && pid2 != pid)
+ return -1;
+ /*
+ * validate suffix
+ */
+ if (strcmp(end, ".dump"))
+ return -1;
+
+ if (verbose > 0)
+ fprintf(stderr, "jit marker found: %s\n", mmap_name);
+
+ return 0;
+}
+
+int
+jit_process(struct perf_session *session,
+ struct perf_data_file *output,
+ struct machine *machine,
+ char *filename,
+ pid_t pid,
+ u64 *nbytes)
+{
+ struct perf_evsel *first;
+ struct jit_buf_desc jd;
+ int ret;
+
+ /*
+ * first, detect marker mmap (i.e., the jitdump mmap)
+ */
+ if (jit_detect(filename, pid))
+ return 0;
+
+ memset(&jd, 0, sizeof(jd));
+
+ jd.session = session;
+ jd.output = output;
+ jd.machine = machine;
+
+ /*
+ * track sample_type to compute id_all layout
+ * perf sets the same sample type to all events as of now
+ */
+ first = perf_evlist__first(session->evlist);
+ jd.sample_type = first->attr.sample_type;
+
+ *nbytes = 0;
+
+ ret = jit_inject(&jd, filename);
+ if (!ret) {
+ *nbytes = jd.bytes_written;
+ ret = 1;
+ }
+
+ return ret;
+}
diff --git a/tools/perf/util/jitdump.h b/tools/perf/util/jitdump.h
new file mode 100644
index 000000000000..b66c1f503d9e
--- /dev/null
+++ b/tools/perf/util/jitdump.h
@@ -0,0 +1,124 @@
+/*
+ * jitdump.h: jitted code info encapsulation file format
+ *
+ * Adapted from OProfile GPLv2 support jidump.h:
+ * Copyright 2007 OProfile authors
+ * Jens Wilke
+ * Daniel Hansel
+ * Copyright IBM Corporation 2007
+ */
+#ifndef JITDUMP_H
+#define JITDUMP_H
+
+#include <sys/time.h>
+#include <time.h>
+#include <stdint.h>
+
+/* JiTD */
+#define JITHEADER_MAGIC 0x4A695444
+#define JITHEADER_MAGIC_SW 0x4454694A
+
+#define PADDING_8ALIGNED(x) ((((x) + 7) & 7) ^ 7)
+
+#define JITHEADER_VERSION 1
+
+enum jitdump_flags_bits {
+ JITDUMP_FLAGS_MAX_BIT,
+};
+
+#define JITDUMP_FLAGS_RESERVED (JITDUMP_FLAGS_MAX_BIT < 64 ? \
+ (~((1ULL << JITDUMP_FLAGS_MAX_BIT) - 1)) : 0)
+
+struct jitheader {
+ uint32_t magic; /* characters "jItD" */
+ uint32_t version; /* header version */
+ uint32_t total_size; /* total size of header */
+ uint32_t elf_mach; /* elf mach target */
+ uint32_t pad1; /* reserved */
+ uint32_t pid; /* JIT process id */
+ uint64_t timestamp; /* timestamp */
+ uint64_t flags; /* flags */
+};
+
+enum jit_record_type {
+ JIT_CODE_LOAD = 0,
+ JIT_CODE_MOVE = 1,
+ JIT_CODE_DEBUG_INFO = 2,
+ JIT_CODE_CLOSE = 3,
+
+ JIT_CODE_MAX,
+};
+
+/* record prefix (mandatory in each record) */
+struct jr_prefix {
+ uint32_t id;
+ uint32_t total_size;
+ uint64_t timestamp;
+};
+
+struct jr_code_load {
+ struct jr_prefix p;
+
+ uint32_t pid;
+ uint32_t tid;
+ uint64_t vma;
+ uint64_t code_addr;
+ uint64_t code_size;
+ uint64_t code_index;
+};
+
+struct jr_code_close {
+ struct jr_prefix p;
+};
+
+struct jr_code_move {
+ struct jr_prefix p;
+
+ uint32_t pid;
+ uint32_t tid;
+ uint64_t vma;
+ uint64_t old_code_addr;
+ uint64_t new_code_addr;
+ uint64_t code_size;
+ uint64_t code_index;
+};
+
+struct debug_entry {
+ uint64_t addr;
+ int lineno; /* source line number starting at 1 */
+ int discrim; /* column discriminator, 0 is default */
+ const char name[0]; /* null terminated filename, \xff\0 if same as previous entry */
+};
+
+struct jr_code_debug_info {
+ struct jr_prefix p;
+
+ uint64_t code_addr;
+ uint64_t nr_entry;
+ struct debug_entry entries[0];
+};
+
+union jr_entry {
+ struct jr_code_debug_info info;
+ struct jr_code_close close;
+ struct jr_code_load load;
+ struct jr_code_move move;
+ struct jr_prefix prefix;
+};
+
+static inline struct debug_entry *
+debug_entry_next(struct debug_entry *ent)
+{
+ void *a = ent + 1;
+ size_t l = strlen(ent->name) + 1;
+ return a + l;
+}
+
+static inline char *
+debug_entry_file(struct debug_entry *ent)
+{
+ void *a = ent + 1;
+ return a;
+}
+
+#endif /* !JITDUMP_H */
diff --git a/tools/perf/util/kvm-stat.h b/tools/perf/util/kvm-stat.h
index ae825d4ec110..d01e73592f6e 100644
--- a/tools/perf/util/kvm-stat.h
+++ b/tools/perf/util/kvm-stat.h
@@ -122,6 +122,7 @@ void exit_event_decode_key(struct perf_kvm_stat *kvm,
bool kvm_exit_event(struct perf_evsel *evsel);
bool kvm_entry_event(struct perf_evsel *evsel);
+int setup_kvm_events_tp(struct perf_kvm_stat *kvm);
#define define_exit_reasons_table(name, symbols) \
static struct exit_reasons_table name[] = { \
@@ -133,8 +134,13 @@ bool kvm_entry_event(struct perf_evsel *evsel);
*/
int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid);
-extern const char * const kvm_events_tp[];
+extern const char *kvm_events_tp[];
extern struct kvm_reg_events_ops kvm_reg_events_ops[];
extern const char * const kvm_skip_events[];
+extern const char *vcpu_id_str;
+extern const int decode_str_len;
+extern const char *kvm_exit_reason;
+extern const char *kvm_entry_trace;
+extern const char *kvm_exit_trace;
#endif /* __PERF_KVM_STAT_H */
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 2c2b443df5ba..1a3e45baf97f 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -180,6 +180,16 @@ struct symbol *machine__find_kernel_symbol(struct machine *machine,
}
static inline
+struct symbol *machine__find_kernel_symbol_by_name(struct machine *machine,
+ enum map_type type, const char *name,
+ struct map **mapp,
+ symbol_filter_t filter)
+{
+ return map_groups__find_symbol_by_name(&machine->kmaps, type, name,
+ mapp, filter);
+}
+
+static inline
struct symbol *machine__find_kernel_function(struct machine *machine, u64 addr,
struct map **mapp,
symbol_filter_t filter)
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
new file mode 100644
index 000000000000..75465f89a413
--- /dev/null
+++ b/tools/perf/util/mem-events.c
@@ -0,0 +1,255 @@
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <api/fs/fs.h>
+#include "mem-events.h"
+#include "debug.h"
+#include "symbol.h"
+
+#define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s }
+
+struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
+ E("ldlat-loads", "cpu/mem-loads,ldlat=30/P", "mem-loads"),
+ E("ldlat-stores", "cpu/mem-stores/P", "mem-stores"),
+};
+#undef E
+
+#undef E
+
+char *perf_mem_events__name(int i)
+{
+ return (char *)perf_mem_events[i].name;
+}
+
+int perf_mem_events__parse(const char *str)
+{
+ char *tok, *saveptr = NULL;
+ bool found = false;
+ char *buf;
+ int j;
+
+ /* We need buffer that we know we can write to. */
+ buf = malloc(strlen(str) + 1);
+ if (!buf)
+ return -ENOMEM;
+
+ strcpy(buf, str);
+
+ tok = strtok_r((char *)buf, ",", &saveptr);
+
+ while (tok) {
+ for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
+ struct perf_mem_event *e = &perf_mem_events[j];
+
+ if (strstr(e->tag, tok))
+ e->record = found = true;
+ }
+
+ tok = strtok_r(NULL, ",", &saveptr);
+ }
+
+ free(buf);
+
+ if (found)
+ return 0;
+
+ pr_err("failed: event '%s' not found, use '-e list' to get list of available events\n", str);
+ return -1;
+}
+
+int perf_mem_events__init(void)
+{
+ const char *mnt = sysfs__mount();
+ bool found = false;
+ int j;
+
+ if (!mnt)
+ return -ENOENT;
+
+ for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
+ char path[PATH_MAX];
+ struct perf_mem_event *e = &perf_mem_events[j];
+ struct stat st;
+
+ scnprintf(path, PATH_MAX, "%s/devices/cpu/events/%s",
+ mnt, e->sysfs_name);
+
+ if (!stat(path, &st))
+ e->supported = found = true;
+ }
+
+ return found ? 0 : -ENOENT;
+}
+
+static const char * const tlb_access[] = {
+ "N/A",
+ "HIT",
+ "MISS",
+ "L1",
+ "L2",
+ "Walker",
+ "Fault",
+};
+
+int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
+{
+ size_t l = 0, i;
+ u64 m = PERF_MEM_TLB_NA;
+ u64 hit, miss;
+
+ sz -= 1; /* -1 for null termination */
+ out[0] = '\0';
+
+ if (mem_info)
+ m = mem_info->data_src.mem_dtlb;
+
+ hit = m & PERF_MEM_TLB_HIT;
+ miss = m & PERF_MEM_TLB_MISS;
+
+ /* already taken care of */
+ m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
+
+ for (i = 0; m && i < ARRAY_SIZE(tlb_access); i++, m >>= 1) {
+ if (!(m & 0x1))
+ continue;
+ if (l) {
+ strcat(out, " or ");
+ l += 4;
+ }
+ l += scnprintf(out + l, sz - l, tlb_access[i]);
+ }
+ if (*out == '\0')
+ l += scnprintf(out, sz - l, "N/A");
+ if (hit)
+ l += scnprintf(out + l, sz - l, " hit");
+ if (miss)
+ l += scnprintf(out + l, sz - l, " miss");
+
+ return l;
+}
+
+static const char * const mem_lvl[] = {
+ "N/A",
+ "HIT",
+ "MISS",
+ "L1",
+ "LFB",
+ "L2",
+ "L3",
+ "Local RAM",
+ "Remote RAM (1 hop)",
+ "Remote RAM (2 hops)",
+ "Remote Cache (1 hop)",
+ "Remote Cache (2 hops)",
+ "I/O",
+ "Uncached",
+};
+
+int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
+{
+ size_t i, l = 0;
+ u64 m = PERF_MEM_LVL_NA;
+ u64 hit, miss;
+
+ if (mem_info)
+ m = mem_info->data_src.mem_lvl;
+
+ sz -= 1; /* -1 for null termination */
+ out[0] = '\0';
+
+ hit = m & PERF_MEM_LVL_HIT;
+ miss = m & PERF_MEM_LVL_MISS;
+
+ /* already taken care of */
+ m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS);
+
+ for (i = 0; m && i < ARRAY_SIZE(mem_lvl); i++, m >>= 1) {
+ if (!(m & 0x1))
+ continue;
+ if (l) {
+ strcat(out, " or ");
+ l += 4;
+ }
+ l += scnprintf(out + l, sz - l, mem_lvl[i]);
+ }
+ if (*out == '\0')
+ l += scnprintf(out, sz - l, "N/A");
+ if (hit)
+ l += scnprintf(out + l, sz - l, " hit");
+ if (miss)
+ l += scnprintf(out + l, sz - l, " miss");
+
+ return l;
+}
+
+static const char * const snoop_access[] = {
+ "N/A",
+ "None",
+ "Miss",
+ "Hit",
+ "HitM",
+};
+
+int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
+{
+ size_t i, l = 0;
+ u64 m = PERF_MEM_SNOOP_NA;
+
+ sz -= 1; /* -1 for null termination */
+ out[0] = '\0';
+
+ if (mem_info)
+ m = mem_info->data_src.mem_snoop;
+
+ for (i = 0; m && i < ARRAY_SIZE(snoop_access); i++, m >>= 1) {
+ if (!(m & 0x1))
+ continue;
+ if (l) {
+ strcat(out, " or ");
+ l += 4;
+ }
+ l += scnprintf(out + l, sz - l, snoop_access[i]);
+ }
+
+ if (*out == '\0')
+ l += scnprintf(out, sz - l, "N/A");
+
+ return l;
+}
+
+int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
+{
+ u64 mask = PERF_MEM_LOCK_NA;
+ int l;
+
+ if (mem_info)
+ mask = mem_info->data_src.mem_lock;
+
+ if (mask & PERF_MEM_LOCK_NA)
+ l = scnprintf(out, sz, "N/A");
+ else if (mask & PERF_MEM_LOCK_LOCKED)
+ l = scnprintf(out, sz, "Yes");
+ else
+ l = scnprintf(out, sz, "No");
+
+ return l;
+}
+
+int perf_script__meminfo_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
+{
+ int i = 0;
+
+ i += perf_mem__lvl_scnprintf(out, sz, mem_info);
+ i += scnprintf(out + i, sz - i, "|SNP ");
+ i += perf_mem__snp_scnprintf(out + i, sz - i, mem_info);
+ i += scnprintf(out + i, sz - i, "|TLB ");
+ i += perf_mem__tlb_scnprintf(out + i, sz - i, mem_info);
+ i += scnprintf(out + i, sz - i, "|LCK ");
+ i += perf_mem__lck_scnprintf(out + i, sz - i, mem_info);
+
+ return i;
+}
diff --git a/tools/perf/util/mem-events.h b/tools/perf/util/mem-events.h
new file mode 100644
index 000000000000..5d6d93066a6e
--- /dev/null
+++ b/tools/perf/util/mem-events.h
@@ -0,0 +1,35 @@
+#ifndef __PERF_MEM_EVENTS_H
+#define __PERF_MEM_EVENTS_H
+
+#include <stdbool.h>
+
+struct perf_mem_event {
+ bool record;
+ bool supported;
+ const char *tag;
+ const char *name;
+ const char *sysfs_name;
+};
+
+enum {
+ PERF_MEM_EVENTS__LOAD,
+ PERF_MEM_EVENTS__STORE,
+ PERF_MEM_EVENTS__MAX,
+};
+
+extern struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX];
+
+int perf_mem_events__parse(const char *str);
+int perf_mem_events__init(void);
+
+char *perf_mem_events__name(int i);
+
+struct mem_info;
+int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
+int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
+int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
+int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
+
+int perf_script__meminfo_scnprintf(char *bf, size_t size, struct mem_info *mem_info);
+
+#endif /* __PERF_MEM_EVENTS_H */
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 813d9b272c81..4c19d5e79d8c 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -279,7 +279,24 @@ const char *event_type(int type)
return "unknown";
}
+static int parse_events__is_name_term(struct parse_events_term *term)
+{
+ return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME;
+}
+static char *get_config_name(struct list_head *head_terms)
+{
+ struct parse_events_term *term;
+
+ if (!head_terms)
+ return NULL;
+
+ list_for_each_entry(term, head_terms, list)
+ if (parse_events__is_name_term(term))
+ return term->val.str;
+
+ return NULL;
+}
static struct perf_evsel *
__add_event(struct list_head *list, int *idx,
@@ -333,11 +350,25 @@ static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES]
return -1;
}
+typedef int config_term_func_t(struct perf_event_attr *attr,
+ struct parse_events_term *term,
+ struct parse_events_error *err);
+static int config_term_common(struct perf_event_attr *attr,
+ struct parse_events_term *term,
+ struct parse_events_error *err);
+static int config_attr(struct perf_event_attr *attr,
+ struct list_head *head,
+ struct parse_events_error *err,
+ config_term_func_t config_term);
+
int parse_events_add_cache(struct list_head *list, int *idx,
- char *type, char *op_result1, char *op_result2)
+ char *type, char *op_result1, char *op_result2,
+ struct parse_events_error *err,
+ struct list_head *head_config)
{
struct perf_event_attr attr;
- char name[MAX_NAME_LEN];
+ LIST_HEAD(config_terms);
+ char name[MAX_NAME_LEN], *config_name;
int cache_type = -1, cache_op = -1, cache_result = -1;
char *op_result[2] = { op_result1, op_result2 };
int i, n;
@@ -351,6 +382,7 @@ int parse_events_add_cache(struct list_head *list, int *idx,
if (cache_type == -1)
return -EINVAL;
+ config_name = get_config_name(head_config);
n = snprintf(name, MAX_NAME_LEN, "%s", type);
for (i = 0; (i < 2) && (op_result[i]); i++) {
@@ -391,7 +423,16 @@ int parse_events_add_cache(struct list_head *list, int *idx,
memset(&attr, 0, sizeof(attr));
attr.config = cache_type | (cache_op << 8) | (cache_result << 16);
attr.type = PERF_TYPE_HW_CACHE;
- return add_event(list, idx, &attr, name, NULL);
+
+ if (head_config) {
+ if (config_attr(&attr, head_config, err,
+ config_term_common))
+ return -EINVAL;
+
+ if (get_config_terms(head_config, &config_terms))
+ return -ENOMEM;
+ }
+ return add_event(list, idx, &attr, config_name ? : name, &config_terms);
}
static void tracepoint_error(struct parse_events_error *e, int err,
@@ -540,6 +581,7 @@ static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
struct __add_bpf_event_param {
struct parse_events_evlist *data;
struct list_head *list;
+ struct list_head *head_config;
};
static int add_bpf_event(struct probe_trace_event *tev, int fd,
@@ -556,7 +598,8 @@ static int add_bpf_event(struct probe_trace_event *tev, int fd,
tev->group, tev->event, fd);
err = parse_events_add_tracepoint(&new_evsels, &evlist->idx, tev->group,
- tev->event, evlist->error, NULL);
+ tev->event, evlist->error,
+ param->head_config);
if (err) {
struct perf_evsel *evsel, *tmp;
@@ -581,11 +624,12 @@ static int add_bpf_event(struct probe_trace_event *tev, int fd,
int parse_events_load_bpf_obj(struct parse_events_evlist *data,
struct list_head *list,
- struct bpf_object *obj)
+ struct bpf_object *obj,
+ struct list_head *head_config)
{
int err;
char errbuf[BUFSIZ];
- struct __add_bpf_event_param param = {data, list};
+ struct __add_bpf_event_param param = {data, list, head_config};
static bool registered_unprobe_atexit = false;
if (IS_ERR(obj) || !obj) {
@@ -631,17 +675,99 @@ errout:
return err;
}
+static int
+parse_events_config_bpf(struct parse_events_evlist *data,
+ struct bpf_object *obj,
+ struct list_head *head_config)
+{
+ struct parse_events_term *term;
+ int error_pos;
+
+ if (!head_config || list_empty(head_config))
+ return 0;
+
+ list_for_each_entry(term, head_config, list) {
+ char errbuf[BUFSIZ];
+ int err;
+
+ if (term->type_term != PARSE_EVENTS__TERM_TYPE_USER) {
+ snprintf(errbuf, sizeof(errbuf),
+ "Invalid config term for BPF object");
+ errbuf[BUFSIZ - 1] = '\0';
+
+ data->error->idx = term->err_term;
+ data->error->str = strdup(errbuf);
+ return -EINVAL;
+ }
+
+ err = bpf__config_obj(obj, term, data->evlist, &error_pos);
+ if (err) {
+ bpf__strerror_config_obj(obj, term, data->evlist,
+ &error_pos, err, errbuf,
+ sizeof(errbuf));
+ data->error->help = strdup(
+"Hint:\tValid config terms:\n"
+" \tmap:[<arraymap>].value<indices>=[value]\n"
+" \tmap:[<eventmap>].event<indices>=[event]\n"
+"\n"
+" \twhere <indices> is something like [0,3...5] or [all]\n"
+" \t(add -v to see detail)");
+ data->error->str = strdup(errbuf);
+ if (err == -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE)
+ data->error->idx = term->err_val;
+ else
+ data->error->idx = term->err_term + error_pos;
+ return err;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Split config terms:
+ * perf record -e bpf.c/call-graph=fp,map:array.value[0]=1/ ...
+ * 'call-graph=fp' is 'evt config', should be applied to each
+ * events in bpf.c.
+ * 'map:array.value[0]=1' is 'obj config', should be processed
+ * with parse_events_config_bpf.
+ *
+ * Move object config terms from the first list to obj_head_config.
+ */
+static void
+split_bpf_config_terms(struct list_head *evt_head_config,
+ struct list_head *obj_head_config)
+{
+ struct parse_events_term *term, *temp;
+
+ /*
+ * Currectly, all possible user config term
+ * belong to bpf object. parse_events__is_hardcoded_term()
+ * happends to be a good flag.
+ *
+ * See parse_events_config_bpf() and
+ * config_term_tracepoint().
+ */
+ list_for_each_entry_safe(term, temp, evt_head_config, list)
+ if (!parse_events__is_hardcoded_term(term))
+ list_move_tail(&term->list, obj_head_config);
+}
+
int parse_events_load_bpf(struct parse_events_evlist *data,
struct list_head *list,
char *bpf_file_name,
- bool source)
+ bool source,
+ struct list_head *head_config)
{
+ int err;
struct bpf_object *obj;
+ LIST_HEAD(obj_head_config);
+
+ if (head_config)
+ split_bpf_config_terms(head_config, &obj_head_config);
obj = bpf__prepare_load(bpf_file_name, source);
if (IS_ERR(obj)) {
char errbuf[BUFSIZ];
- int err;
err = PTR_ERR(obj);
@@ -659,7 +785,18 @@ int parse_events_load_bpf(struct parse_events_evlist *data,
return err;
}
- return parse_events_load_bpf_obj(data, list, obj);
+ err = parse_events_load_bpf_obj(data, list, obj, head_config);
+ if (err)
+ return err;
+ err = parse_events_config_bpf(data, obj, &obj_head_config);
+
+ /*
+ * Caller doesn't know anything about obj_head_config,
+ * so combine them together again before returnning.
+ */
+ if (head_config)
+ list_splice_tail(&obj_head_config, head_config);
+ return err;
}
static int
@@ -746,9 +883,59 @@ static int check_type_val(struct parse_events_term *term,
return -EINVAL;
}
-typedef int config_term_func_t(struct perf_event_attr *attr,
- struct parse_events_term *term,
- struct parse_events_error *err);
+/*
+ * Update according to parse-events.l
+ */
+static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
+ [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>",
+ [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config",
+ [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1",
+ [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2",
+ [PARSE_EVENTS__TERM_TYPE_NAME] = "name",
+ [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period",
+ [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq",
+ [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type",
+ [PARSE_EVENTS__TERM_TYPE_TIME] = "time",
+ [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph",
+ [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size",
+ [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit",
+ [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit",
+};
+
+static bool config_term_shrinked;
+
+static bool
+config_term_avail(int term_type, struct parse_events_error *err)
+{
+ if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) {
+ err->str = strdup("Invalid term_type");
+ return false;
+ }
+ if (!config_term_shrinked)
+ return true;
+
+ switch (term_type) {
+ case PARSE_EVENTS__TERM_TYPE_CONFIG:
+ case PARSE_EVENTS__TERM_TYPE_CONFIG1:
+ case PARSE_EVENTS__TERM_TYPE_CONFIG2:
+ case PARSE_EVENTS__TERM_TYPE_NAME:
+ return true;
+ default:
+ if (!err)
+ return false;
+
+ /* term_type is validated so indexing is safe */
+ if (asprintf(&err->str, "'%s' is not usable in 'perf stat'",
+ config_term_names[term_type]) < 0)
+ err->str = NULL;
+ return false;
+ }
+}
+
+void parse_events__shrink_config_terms(void)
+{
+ config_term_shrinked = true;
+}
static int config_term_common(struct perf_event_attr *attr,
struct parse_events_term *term,
@@ -815,6 +1002,17 @@ do { \
return -EINVAL;
}
+ /*
+ * Check term availbility after basic checking so
+ * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered.
+ *
+ * If check availbility at the entry of this function,
+ * user will see "'<sysfs term>' is not usable in 'perf stat'"
+ * if an invalid config term is provided for legacy events
+ * (for example, instructions/badterm/...), which is confusing.
+ */
+ if (!config_term_avail(term->type_term, err))
+ return -EINVAL;
return 0;
#undef CHECK_TYPE_VAL
}
@@ -961,23 +1159,8 @@ int parse_events_add_numeric(struct parse_events_evlist *data,
return -ENOMEM;
}
- return add_event(list, &data->idx, &attr, NULL, &config_terms);
-}
-
-static int parse_events__is_name_term(struct parse_events_term *term)
-{
- return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME;
-}
-
-static char *pmu_event_name(struct list_head *head_terms)
-{
- struct parse_events_term *term;
-
- list_for_each_entry(term, head_terms, list)
- if (parse_events__is_name_term(term))
- return term->val.str;
-
- return NULL;
+ return add_event(list, &data->idx, &attr,
+ get_config_name(head_config), &config_terms);
}
int parse_events_add_pmu(struct parse_events_evlist *data,
@@ -1024,7 +1207,7 @@ int parse_events_add_pmu(struct parse_events_evlist *data,
return -EINVAL;
evsel = __add_event(list, &data->idx, &attr,
- pmu_event_name(head_config), pmu->cpus,
+ get_config_name(head_config), pmu->cpus,
&config_terms);
if (evsel) {
evsel->unit = info.unit;
@@ -1386,8 +1569,7 @@ int parse_events_terms(struct list_head *terms, const char *str)
return 0;
}
- if (data.terms)
- parse_events__free_terms(data.terms);
+ parse_events_terms__delete(data.terms);
return ret;
}
@@ -1395,9 +1577,10 @@ int parse_events(struct perf_evlist *evlist, const char *str,
struct parse_events_error *err)
{
struct parse_events_evlist data = {
- .list = LIST_HEAD_INIT(data.list),
- .idx = evlist->nr_entries,
- .error = err,
+ .list = LIST_HEAD_INIT(data.list),
+ .idx = evlist->nr_entries,
+ .error = err,
+ .evlist = evlist,
};
int ret;
@@ -2068,12 +2251,29 @@ int parse_events_term__clone(struct parse_events_term **new,
term->err_term, term->err_val);
}
-void parse_events__free_terms(struct list_head *terms)
+void parse_events_terms__purge(struct list_head *terms)
{
struct parse_events_term *term, *h;
- list_for_each_entry_safe(term, h, terms, list)
+ list_for_each_entry_safe(term, h, terms, list) {
+ if (term->array.nr_ranges)
+ free(term->array.ranges);
+ list_del_init(&term->list);
free(term);
+ }
+}
+
+void parse_events_terms__delete(struct list_head *terms)
+{
+ if (!terms)
+ return;
+ parse_events_terms__purge(terms);
+ free(terms);
+}
+
+void parse_events__clear_array(struct parse_events_array *a)
+{
+ free(a->ranges);
}
void parse_events_evlist_error(struct parse_events_evlist *data,
@@ -2088,6 +2288,33 @@ void parse_events_evlist_error(struct parse_events_evlist *data,
WARN_ONCE(!err->str, "WARNING: failed to allocate error string");
}
+static void config_terms_list(char *buf, size_t buf_sz)
+{
+ int i;
+ bool first = true;
+
+ buf[0] = '\0';
+ for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) {
+ const char *name = config_term_names[i];
+
+ if (!config_term_avail(i, NULL))
+ continue;
+ if (!name)
+ continue;
+ if (name[0] == '<')
+ continue;
+
+ if (strlen(buf) + strlen(name) + 2 >= buf_sz)
+ return;
+
+ if (!first)
+ strcat(buf, ",");
+ else
+ first = false;
+ strcat(buf, name);
+ }
+}
+
/*
* Return string contains valid config terms of an event.
* @additional_terms: For terms such as PMU sysfs terms.
@@ -2095,17 +2322,18 @@ void parse_events_evlist_error(struct parse_events_evlist *data,
char *parse_events_formats_error_string(char *additional_terms)
{
char *str;
- static const char *static_terms = "config,config1,config2,name,"
- "period,freq,branch_type,time,"
- "call-graph,stack-size\n";
+ /* "branch_type" is the longest name */
+ char static_terms[__PARSE_EVENTS__TERM_TYPE_NR *
+ (sizeof("branch_type") - 1)];
+ config_terms_list(static_terms, sizeof(static_terms));
/* valid terms */
if (additional_terms) {
- if (!asprintf(&str, "valid terms: %s,%s",
- additional_terms, static_terms))
+ if (asprintf(&str, "valid terms: %s,%s",
+ additional_terms, static_terms) < 0)
goto fail;
} else {
- if (!asprintf(&str, "valid terms: %s", static_terms))
+ if (asprintf(&str, "valid terms: %s", static_terms) < 0)
goto fail;
}
return str;
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index f1a6db107241..67e493088e81 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -68,11 +68,21 @@ enum {
PARSE_EVENTS__TERM_TYPE_CALLGRAPH,
PARSE_EVENTS__TERM_TYPE_STACKSIZE,
PARSE_EVENTS__TERM_TYPE_NOINHERIT,
- PARSE_EVENTS__TERM_TYPE_INHERIT
+ PARSE_EVENTS__TERM_TYPE_INHERIT,
+ __PARSE_EVENTS__TERM_TYPE_NR,
+};
+
+struct parse_events_array {
+ size_t nr_ranges;
+ struct {
+ unsigned int start;
+ size_t length;
+ } *ranges;
};
struct parse_events_term {
char *config;
+ struct parse_events_array array;
union {
char *str;
u64 num;
@@ -98,12 +108,14 @@ struct parse_events_evlist {
int idx;
int nr_groups;
struct parse_events_error *error;
+ struct perf_evlist *evlist;
};
struct parse_events_terms {
struct list_head *terms;
};
+void parse_events__shrink_config_terms(void);
int parse_events__is_hardcoded_term(struct parse_events_term *term);
int parse_events_term__num(struct parse_events_term **term,
int type_term, char *config, u64 num,
@@ -115,7 +127,9 @@ int parse_events_term__sym_hw(struct parse_events_term **term,
char *config, unsigned idx);
int parse_events_term__clone(struct parse_events_term **new,
struct parse_events_term *term);
-void parse_events__free_terms(struct list_head *terms);
+void parse_events_terms__delete(struct list_head *terms);
+void parse_events_terms__purge(struct list_head *terms);
+void parse_events__clear_array(struct parse_events_array *a);
int parse_events__modifier_event(struct list_head *list, char *str, bool add);
int parse_events__modifier_group(struct list_head *list, char *event_mod);
int parse_events_name(struct list_head *list, char *name);
@@ -126,18 +140,22 @@ int parse_events_add_tracepoint(struct list_head *list, int *idx,
int parse_events_load_bpf(struct parse_events_evlist *data,
struct list_head *list,
char *bpf_file_name,
- bool source);
+ bool source,
+ struct list_head *head_config);
/* Provide this function for perf test */
struct bpf_object;
int parse_events_load_bpf_obj(struct parse_events_evlist *data,
struct list_head *list,
- struct bpf_object *obj);
+ struct bpf_object *obj,
+ struct list_head *head_config);
int parse_events_add_numeric(struct parse_events_evlist *data,
struct list_head *list,
u32 type, u64 config,
struct list_head *head_config);
int parse_events_add_cache(struct list_head *list, int *idx,
- char *type, char *op_result1, char *op_result2);
+ char *type, char *op_result1, char *op_result2,
+ struct parse_events_error *error,
+ struct list_head *head_config);
int parse_events_add_breakpoint(struct list_head *list, int *idx,
void *ptr, char *type, u64 len);
int parse_events_add_pmu(struct parse_events_evlist *data,
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 58c5831ffd5c..1477fbc78993 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -9,8 +9,8 @@
%{
#include <errno.h>
#include "../perf.h"
-#include "parse-events-bison.h"
#include "parse-events.h"
+#include "parse-events-bison.h"
char *parse_events_get_text(yyscan_t yyscanner);
YYSTYPE *parse_events_get_lval(yyscan_t yyscanner);
@@ -111,6 +111,7 @@ do { \
%x mem
%s config
%x event
+%x array
group [^,{}/]*[{][^}]*[}][^,{}/]*
event_pmu [^,{}/]+[/][^/]*[/][^,{}/]*
@@ -122,7 +123,7 @@ num_dec [0-9]+
num_hex 0x[a-fA-F0-9]+
num_raw_hex [a-fA-F0-9]+
name [a-zA-Z_*?][a-zA-Z0-9_*?.]*
-name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?.]*
+name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?.:]*
/* If you add a modifier you need to update check_modifier() */
modifier_event [ukhpPGHSDI]+
modifier_bp [rwx]{1,3}
@@ -176,10 +177,17 @@ modifier_bp [rwx]{1,3}
}
+<array>{
+"]" { BEGIN(config); return ']'; }
+{num_dec} { return value(yyscanner, 10); }
+{num_hex} { return value(yyscanner, 16); }
+, { return ','; }
+"\.\.\." { return PE_ARRAY_RANGE; }
+}
+
<config>{
/*
- * Please update parse_events_formats_error_string any time
- * new static term is added.
+ * Please update config_term_names when new static term is added.
*/
config { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG); }
config1 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG1); }
@@ -196,6 +204,8 @@ no-inherit { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NOINHERIT); }
, { return ','; }
"/" { BEGIN(INITIAL); return '/'; }
{name_minus} { return str(yyscanner, PE_NAME); }
+\[all\] { return PE_ARRAY_ALL; }
+"[" { BEGIN(array); return '['; }
}
<mem>{
@@ -238,6 +248,7 @@ cpu-migrations|migrations { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COU
alignment-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); }
emulation-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); }
dummy { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); }
+bpf-output { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_BPF_OUTPUT); }
/*
* We have to handle the kernel PMU event cycles-ct/cycles-t/mem-loads/mem-stores separately.
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index ad379968d4c1..5be4a5f216d6 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -28,7 +28,7 @@ do { \
INIT_LIST_HEAD(list); \
} while (0)
-static inc_group_count(struct list_head *list,
+static void inc_group_count(struct list_head *list,
struct parse_events_evlist *data)
{
/* Count groups only have more than 1 members */
@@ -48,6 +48,7 @@ static inc_group_count(struct list_head *list,
%token PE_PREFIX_MEM PE_PREFIX_RAW PE_PREFIX_GROUP
%token PE_ERROR
%token PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_KERNEL_PMU_EVENT
+%token PE_ARRAY_ALL PE_ARRAY_RANGE
%type <num> PE_VALUE
%type <num> PE_VALUE_SYM_HW
%type <num> PE_VALUE_SYM_SW
@@ -64,6 +65,7 @@ static inc_group_count(struct list_head *list,
%type <str> PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_KERNEL_PMU_EVENT
%type <num> value_sym
%type <head> event_config
+%type <head> opt_event_config
%type <term> event_term
%type <head> event_pmu
%type <head> event_legacy_symbol
@@ -82,6 +84,9 @@ static inc_group_count(struct list_head *list,
%type <head> group_def
%type <head> group
%type <head> groups
+%type <array> array
+%type <array> array_term
+%type <array> array_terms
%union
{
@@ -93,6 +98,7 @@ static inc_group_count(struct list_head *list,
char *sys;
char *event;
} tracepoint_name;
+ struct parse_events_array array;
}
%%
@@ -211,24 +217,14 @@ event_def: event_pmu |
event_bpf_file
event_pmu:
-PE_NAME '/' event_config '/'
+PE_NAME opt_event_config
{
struct parse_events_evlist *data = _data;
struct list_head *list;
ALLOC_LIST(list);
- ABORT_ON(parse_events_add_pmu(data, list, $1, $3));
- parse_events__free_terms($3);
- $$ = list;
-}
-|
-PE_NAME '/' '/'
-{
- struct parse_events_evlist *data = _data;
- struct list_head *list;
-
- ALLOC_LIST(list);
- ABORT_ON(parse_events_add_pmu(data, list, $1, NULL));
+ ABORT_ON(parse_events_add_pmu(data, list, $1, $2));
+ parse_events_terms__delete($2);
$$ = list;
}
|
@@ -246,7 +242,7 @@ PE_KERNEL_PMU_EVENT sep_dc
ALLOC_LIST(list);
ABORT_ON(parse_events_add_pmu(data, list, "cpu", head));
- parse_events__free_terms(head);
+ parse_events_terms__delete(head);
$$ = list;
}
|
@@ -266,7 +262,7 @@ PE_PMU_EVENT_PRE '-' PE_PMU_EVENT_SUF sep_dc
ALLOC_LIST(list);
ABORT_ON(parse_events_add_pmu(data, list, "cpu", head));
- parse_events__free_terms(head);
+ parse_events_terms__delete(head);
$$ = list;
}
@@ -285,7 +281,7 @@ value_sym '/' event_config '/'
ALLOC_LIST(list);
ABORT_ON(parse_events_add_numeric(data, list, type, config, $3));
- parse_events__free_terms($3);
+ parse_events_terms__delete($3);
$$ = list;
}
|
@@ -302,33 +298,39 @@ value_sym sep_slash_dc
}
event_legacy_cache:
-PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT
+PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT opt_event_config
{
struct parse_events_evlist *data = _data;
+ struct parse_events_error *error = data->error;
struct list_head *list;
ALLOC_LIST(list);
- ABORT_ON(parse_events_add_cache(list, &data->idx, $1, $3, $5));
+ ABORT_ON(parse_events_add_cache(list, &data->idx, $1, $3, $5, error, $6));
+ parse_events_terms__delete($6);
$$ = list;
}
|
-PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT
+PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT opt_event_config
{
struct parse_events_evlist *data = _data;
+ struct parse_events_error *error = data->error;
struct list_head *list;
ALLOC_LIST(list);
- ABORT_ON(parse_events_add_cache(list, &data->idx, $1, $3, NULL));
+ ABORT_ON(parse_events_add_cache(list, &data->idx, $1, $3, NULL, error, $4));
+ parse_events_terms__delete($4);
$$ = list;
}
|
-PE_NAME_CACHE_TYPE
+PE_NAME_CACHE_TYPE opt_event_config
{
struct parse_events_evlist *data = _data;
+ struct parse_events_error *error = data->error;
struct list_head *list;
ALLOC_LIST(list);
- ABORT_ON(parse_events_add_cache(list, &data->idx, $1, NULL, NULL));
+ ABORT_ON(parse_events_add_cache(list, &data->idx, $1, NULL, NULL, error, $2));
+ parse_events_terms__delete($2);
$$ = list;
}
@@ -378,24 +380,7 @@ PE_PREFIX_MEM PE_VALUE sep_dc
}
event_legacy_tracepoint:
-tracepoint_name
-{
- struct parse_events_evlist *data = _data;
- struct parse_events_error *error = data->error;
- struct list_head *list;
-
- ALLOC_LIST(list);
- if (error)
- error->idx = @1.first_column;
-
- if (parse_events_add_tracepoint(list, &data->idx, $1.sys, $1.event,
- error, NULL))
- return -1;
-
- $$ = list;
-}
-|
-tracepoint_name '/' event_config '/'
+tracepoint_name opt_event_config
{
struct parse_events_evlist *data = _data;
struct parse_events_error *error = data->error;
@@ -406,7 +391,7 @@ tracepoint_name '/' event_config '/'
error->idx = @1.first_column;
if (parse_events_add_tracepoint(list, &data->idx, $1.sys, $1.event,
- error, $3))
+ error, $2))
return -1;
$$ = list;
@@ -433,49 +418,68 @@ PE_NAME ':' PE_NAME
}
event_legacy_numeric:
-PE_VALUE ':' PE_VALUE
+PE_VALUE ':' PE_VALUE opt_event_config
{
struct parse_events_evlist *data = _data;
struct list_head *list;
ALLOC_LIST(list);
- ABORT_ON(parse_events_add_numeric(data, list, (u32)$1, $3, NULL));
+ ABORT_ON(parse_events_add_numeric(data, list, (u32)$1, $3, $4));
+ parse_events_terms__delete($4);
$$ = list;
}
event_legacy_raw:
-PE_RAW
+PE_RAW opt_event_config
{
struct parse_events_evlist *data = _data;
struct list_head *list;
ALLOC_LIST(list);
- ABORT_ON(parse_events_add_numeric(data, list, PERF_TYPE_RAW, $1, NULL));
+ ABORT_ON(parse_events_add_numeric(data, list, PERF_TYPE_RAW, $1, $2));
+ parse_events_terms__delete($2);
$$ = list;
}
event_bpf_file:
-PE_BPF_OBJECT
+PE_BPF_OBJECT opt_event_config
{
struct parse_events_evlist *data = _data;
struct parse_events_error *error = data->error;
struct list_head *list;
ALLOC_LIST(list);
- ABORT_ON(parse_events_load_bpf(data, list, $1, false));
+ ABORT_ON(parse_events_load_bpf(data, list, $1, false, $2));
+ parse_events_terms__delete($2);
$$ = list;
}
|
-PE_BPF_SOURCE
+PE_BPF_SOURCE opt_event_config
{
struct parse_events_evlist *data = _data;
struct list_head *list;
ALLOC_LIST(list);
- ABORT_ON(parse_events_load_bpf(data, list, $1, true));
+ ABORT_ON(parse_events_load_bpf(data, list, $1, true, $2));
+ parse_events_terms__delete($2);
$$ = list;
}
+opt_event_config:
+'/' event_config '/'
+{
+ $$ = $2;
+}
+|
+'/' '/'
+{
+ $$ = NULL;
+}
+|
+{
+ $$ = NULL;
+}
+
start_terms: event_config
{
struct parse_events_terms *data = _data;
@@ -573,6 +577,86 @@ PE_TERM
ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, 1, &@1, NULL));
$$ = term;
}
+|
+PE_NAME array '=' PE_NAME
+{
+ struct parse_events_term *term;
+ int i;
+
+ ABORT_ON(parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER,
+ $1, $4, &@1, &@4));
+
+ term->array = $2;
+ $$ = term;
+}
+|
+PE_NAME array '=' PE_VALUE
+{
+ struct parse_events_term *term;
+
+ ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
+ $1, $4, &@1, &@4));
+ term->array = $2;
+ $$ = term;
+}
+
+array:
+'[' array_terms ']'
+{
+ $$ = $2;
+}
+|
+PE_ARRAY_ALL
+{
+ $$.nr_ranges = 0;
+ $$.ranges = NULL;
+}
+
+array_terms:
+array_terms ',' array_term
+{
+ struct parse_events_array new_array;
+
+ new_array.nr_ranges = $1.nr_ranges + $3.nr_ranges;
+ new_array.ranges = malloc(sizeof(new_array.ranges[0]) *
+ new_array.nr_ranges);
+ ABORT_ON(!new_array.ranges);
+ memcpy(&new_array.ranges[0], $1.ranges,
+ $1.nr_ranges * sizeof(new_array.ranges[0]));
+ memcpy(&new_array.ranges[$1.nr_ranges], $3.ranges,
+ $3.nr_ranges * sizeof(new_array.ranges[0]));
+ free($1.ranges);
+ free($3.ranges);
+ $$ = new_array;
+}
+|
+array_term
+
+array_term:
+PE_VALUE
+{
+ struct parse_events_array array;
+
+ array.nr_ranges = 1;
+ array.ranges = malloc(sizeof(array.ranges[0]));
+ ABORT_ON(!array.ranges);
+ array.ranges[0].start = $1;
+ array.ranges[0].length = 1;
+ $$ = array;
+}
+|
+PE_VALUE PE_ARRAY_RANGE PE_VALUE
+{
+ struct parse_events_array array;
+
+ ABORT_ON($3 < $1);
+ array.nr_ranges = 1;
+ array.ranges = malloc(sizeof(array.ranges[0]));
+ ABORT_ON(!array.ranges);
+ array.ranges[0].start = $1;
+ array.ranges[0].length = $3 - $1 + 1;
+ $$ = array;
+}
sep_dc: ':' |
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index b597bcc8fc78..adef23b1352e 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -98,7 +98,7 @@ static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, char *dir, char *
char scale[128];
int fd, ret = -1;
char path[PATH_MAX];
- const char *lc;
+ char *lc;
snprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
@@ -124,6 +124,17 @@ static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, char *dir, char *
lc = setlocale(LC_NUMERIC, NULL);
/*
+ * The lc string may be allocated in static storage,
+ * so get a dynamic copy to make it survive setlocale
+ * call below.
+ */
+ lc = strdup(lc);
+ if (!lc) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /*
* force to C locale to ensure kernel
* scale string is converted correctly.
* kernel uses default C locale.
@@ -135,6 +146,8 @@ static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, char *dir, char *
/* restore locale */
setlocale(LC_NUMERIC, lc);
+ free(lc);
+
ret = 0;
error:
close(fd);
@@ -153,7 +166,7 @@ static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *n
if (fd == -1)
return -1;
- sret = read(fd, alias->unit, UNIT_MAX_LEN);
+ sret = read(fd, alias->unit, UNIT_MAX_LEN);
if (sret < 0)
goto error;
@@ -284,13 +297,12 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
{
struct dirent *evt_ent;
DIR *event_dir;
- int ret = 0;
event_dir = opendir(dir);
if (!event_dir)
return -EINVAL;
- while (!ret && (evt_ent = readdir(event_dir))) {
+ while ((evt_ent = readdir(event_dir))) {
char path[PATH_MAX];
char *name = evt_ent->d_name;
FILE *file;
@@ -306,17 +318,19 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
snprintf(path, PATH_MAX, "%s/%s", dir, name);
- ret = -EINVAL;
file = fopen(path, "r");
- if (!file)
- break;
+ if (!file) {
+ pr_debug("Cannot open %s\n", path);
+ continue;
+ }
- ret = perf_pmu__new_alias(head, dir, name, file);
+ if (perf_pmu__new_alias(head, dir, name, file) < 0)
+ pr_debug("Cannot set up %s\n", name);
fclose(file);
}
closedir(event_dir);
- return ret;
+ return 0;
}
/*
@@ -354,7 +368,7 @@ static int pmu_alias_terms(struct perf_pmu_alias *alias,
list_for_each_entry(term, &alias->terms, list) {
ret = parse_events_term__clone(&cloned, term);
if (ret) {
- parse_events__free_terms(&list);
+ parse_events_terms__purge(&list);
return ret;
}
list_add_tail(&cloned->list, &list);
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 544509c159ce..b3aabc0d4eb0 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -187,6 +187,9 @@ static void define_event_symbols(struct event_format *event,
const char *ev_name,
struct print_arg *args)
{
+ if (args == NULL)
+ return;
+
switch (args->type) {
case PRINT_NULL:
break;
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index d72fafc1c800..fbd05242b4e5 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -205,6 +205,9 @@ static void define_event_symbols(struct event_format *event,
const char *ev_name,
struct print_arg *args)
{
+ if (args == NULL)
+ return;
+
switch (args->type) {
case PRINT_NULL:
break;
@@ -1091,8 +1094,6 @@ static int python_start_script(const char *script, int argc, const char **argv)
goto error;
}
- free(command_line);
-
set_table_handlers(tables);
if (tables->db_export_mode) {
@@ -1101,6 +1102,8 @@ static int python_start_script(const char *script, int argc, const char **argv)
goto error;
}
+ free(command_line);
+
return err;
error:
Py_Finalize();
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 40b7a0d0905b..60b3593d210d 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -240,14 +240,6 @@ static int process_event_stub(struct perf_tool *tool __maybe_unused,
return 0;
}
-static int process_build_id_stub(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused,
- struct perf_session *session __maybe_unused)
-{
- dump_printf(": unhandled!\n");
- return 0;
-}
-
static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct ordered_events *oe __maybe_unused)
@@ -260,23 +252,6 @@ static int process_finished_round(struct perf_tool *tool,
union perf_event *event,
struct ordered_events *oe);
-static int process_id_index_stub(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused,
- struct perf_session *perf_session
- __maybe_unused)
-{
- dump_printf(": unhandled!\n");
- return 0;
-}
-
-static int process_event_auxtrace_info_stub(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused,
- struct perf_session *session __maybe_unused)
-{
- dump_printf(": unhandled!\n");
- return 0;
-}
-
static int skipn(int fd, off_t n)
{
char buf[4096];
@@ -303,10 +278,9 @@ static s64 process_event_auxtrace_stub(struct perf_tool *tool __maybe_unused,
return event->auxtrace.size;
}
-static
-int process_event_auxtrace_error_stub(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused,
- struct perf_session *session __maybe_unused)
+static int process_event_op2_stub(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_session *session __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
@@ -410,7 +384,7 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
if (tool->tracing_data == NULL)
tool->tracing_data = process_event_synth_tracing_data_stub;
if (tool->build_id == NULL)
- tool->build_id = process_build_id_stub;
+ tool->build_id = process_event_op2_stub;
if (tool->finished_round == NULL) {
if (tool->ordered_events)
tool->finished_round = process_finished_round;
@@ -418,13 +392,13 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
tool->finished_round = process_finished_round_stub;
}
if (tool->id_index == NULL)
- tool->id_index = process_id_index_stub;
+ tool->id_index = process_event_op2_stub;
if (tool->auxtrace_info == NULL)
- tool->auxtrace_info = process_event_auxtrace_info_stub;
+ tool->auxtrace_info = process_event_op2_stub;
if (tool->auxtrace == NULL)
tool->auxtrace = process_event_auxtrace_stub;
if (tool->auxtrace_error == NULL)
- tool->auxtrace_error = process_event_auxtrace_error_stub;
+ tool->auxtrace_error = process_event_op2_stub;
if (tool->thread_map == NULL)
tool->thread_map = process_event_thread_map_stub;
if (tool->cpu_map == NULL)
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index 1833103768cb..c8680984d2d6 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -22,6 +22,7 @@ cflags = getenv('CFLAGS', '').split()
# switch off several checks (need to be at the end of cflags list)
cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
+src_perf = getenv('srctree') + '/tools/perf'
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
@@ -30,6 +31,9 @@ libapikfs = getenv('LIBAPI')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
+# use full paths with source files
+ext_sources = map(lambda x: '%s/%s' % (src_perf, x) , ext_sources)
+
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index ec722346e6ff..93fa136b0025 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -6,6 +6,7 @@
#include "evsel.h"
#include "evlist.h"
#include <traceevent/event-parse.h>
+#include "mem-events.h"
regex_t parent_regex;
const char default_parent_pattern[] = "^sys_|^do_page_fault";
@@ -25,9 +26,19 @@ int sort__has_parent = 0;
int sort__has_sym = 0;
int sort__has_dso = 0;
int sort__has_socket = 0;
+int sort__has_thread = 0;
+int sort__has_comm = 0;
enum sort_mode sort__mode = SORT_MODE__NORMAL;
-
+/*
+ * Replaces all occurrences of a char used with the:
+ *
+ * -t, --field-separator
+ *
+ * option, that uses a special separator character and don't pad with spaces,
+ * replacing all occurances of this separator in symbol names (and other
+ * output) with a '.' character, that thus it's the only non valid separator.
+*/
static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
{
int n;
@@ -80,10 +91,21 @@ static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
width, width, comm ?: "");
}
+static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
+{
+ const struct thread *th = arg;
+
+ if (type != HIST_FILTER__THREAD)
+ return -1;
+
+ return th && he->thread != th;
+}
+
struct sort_entry sort_thread = {
.se_header = " Pid:Command",
.se_cmp = sort__thread_cmp,
.se_snprintf = hist_entry__thread_snprintf,
+ .se_filter = hist_entry__thread_filter,
.se_width_idx = HISTC_THREAD,
};
@@ -121,6 +143,7 @@ struct sort_entry sort_comm = {
.se_collapse = sort__comm_collapse,
.se_sort = sort__comm_sort,
.se_snprintf = hist_entry__comm_snprintf,
+ .se_filter = hist_entry__thread_filter,
.se_width_idx = HISTC_COMM,
};
@@ -170,10 +193,21 @@ static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
}
+static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
+{
+ const struct dso *dso = arg;
+
+ if (type != HIST_FILTER__DSO)
+ return -1;
+
+ return dso && (!he->ms.map || he->ms.map->dso != dso);
+}
+
struct sort_entry sort_dso = {
.se_header = "Shared Object",
.se_cmp = sort__dso_cmp,
.se_snprintf = hist_entry__dso_snprintf,
+ .se_filter = hist_entry__dso_filter,
.se_width_idx = HISTC_DSO,
};
@@ -246,10 +280,8 @@ static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
ip - map->unmap_ip(map, sym->start));
- ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
- width - ret, "");
} else {
- ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
+ ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
width - ret,
sym->name);
}
@@ -257,14 +289,9 @@ static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
size_t len = BITS_PER_LONG / 4;
ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
len, ip);
- ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
- width - ret, "");
}
- if (ret > width)
- bf[width] = '\0';
-
- return width;
+ return ret;
}
static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
@@ -274,46 +301,56 @@ static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
he->level, bf, size, width);
}
+static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
+{
+ const char *sym = arg;
+
+ if (type != HIST_FILTER__SYMBOL)
+ return -1;
+
+ return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
+}
+
struct sort_entry sort_sym = {
.se_header = "Symbol",
.se_cmp = sort__sym_cmp,
.se_sort = sort__sym_sort,
.se_snprintf = hist_entry__sym_snprintf,
+ .se_filter = hist_entry__sym_filter,
.se_width_idx = HISTC_SYMBOL,
};
/* --sort srcline */
+static char *hist_entry__get_srcline(struct hist_entry *he)
+{
+ struct map *map = he->ms.map;
+
+ if (!map)
+ return SRCLINE_UNKNOWN;
+
+ return get_srcline(map->dso, map__rip_2objdump(map, he->ip),
+ he->ms.sym, true);
+}
+
static int64_t
sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
{
- if (!left->srcline) {
- if (!left->ms.map)
- left->srcline = SRCLINE_UNKNOWN;
- else {
- struct map *map = left->ms.map;
- left->srcline = get_srcline(map->dso,
- map__rip_2objdump(map, left->ip),
- left->ms.sym, true);
- }
- }
- if (!right->srcline) {
- if (!right->ms.map)
- right->srcline = SRCLINE_UNKNOWN;
- else {
- struct map *map = right->ms.map;
- right->srcline = get_srcline(map->dso,
- map__rip_2objdump(map, right->ip),
- right->ms.sym, true);
- }
- }
+ if (!left->srcline)
+ left->srcline = hist_entry__get_srcline(left);
+ if (!right->srcline)
+ right->srcline = hist_entry__get_srcline(right);
+
return strcmp(right->srcline, left->srcline);
}
static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcline);
+ if (!he->srcline)
+ he->srcline = hist_entry__get_srcline(he);
+
+ return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
}
struct sort_entry sort_srcline = {
@@ -327,11 +364,14 @@ struct sort_entry sort_srcline = {
static char no_srcfile[1];
-static char *get_srcfile(struct hist_entry *e)
+static char *hist_entry__get_srcfile(struct hist_entry *e)
{
char *sf, *p;
struct map *map = e->ms.map;
+ if (!map)
+ return no_srcfile;
+
sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
e->ms.sym, false, true);
if (!strcmp(sf, SRCLINE_UNKNOWN))
@@ -348,25 +388,21 @@ static char *get_srcfile(struct hist_entry *e)
static int64_t
sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
{
- if (!left->srcfile) {
- if (!left->ms.map)
- left->srcfile = no_srcfile;
- else
- left->srcfile = get_srcfile(left);
- }
- if (!right->srcfile) {
- if (!right->ms.map)
- right->srcfile = no_srcfile;
- else
- right->srcfile = get_srcfile(right);
- }
+ if (!left->srcfile)
+ left->srcfile = hist_entry__get_srcfile(left);
+ if (!right->srcfile)
+ right->srcfile = hist_entry__get_srcfile(right);
+
return strcmp(right->srcfile, left->srcfile);
}
static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcfile);
+ if (!he->srcfile)
+ he->srcfile = hist_entry__get_srcfile(he);
+
+ return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
}
struct sort_entry sort_srcfile = {
@@ -439,10 +475,21 @@ static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
}
+static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
+{
+ int sk = *(const int *)arg;
+
+ if (type != HIST_FILTER__SOCKET)
+ return -1;
+
+ return sk >= 0 && he->socket != sk;
+}
+
struct sort_entry sort_socket = {
.se_header = "Socket",
.se_cmp = sort__socket_cmp,
.se_snprintf = hist_entry__socket_snprintf,
+ .se_filter = hist_entry__socket_filter,
.se_width_idx = HISTC_SOCKET,
};
@@ -483,9 +530,6 @@ sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
if (right->trace_output == NULL)
right->trace_output = get_trace_output(right);
- hists__new_col_len(left->hists, HISTC_TRACE, strlen(left->trace_output));
- hists__new_col_len(right->hists, HISTC_TRACE, strlen(right->trace_output));
-
return strcmp(right->trace_output, left->trace_output);
}
@@ -496,11 +540,11 @@ static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
evsel = hists_to_evsel(he->hists);
if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
- return scnprintf(bf, size, "%-*.*s", width, width, "N/A");
+ return scnprintf(bf, size, "%-.*s", width, "N/A");
if (he->trace_output == NULL)
he->trace_output = get_trace_output(he);
- return repsep_snprintf(bf, size, "%-*.*s", width, width, he->trace_output);
+ return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
}
struct sort_entry sort_trace = {
@@ -532,6 +576,18 @@ static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
}
+static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
+ const void *arg)
+{
+ const struct dso *dso = arg;
+
+ if (type != HIST_FILTER__DSO)
+ return -1;
+
+ return dso && (!he->branch_info || !he->branch_info->from.map ||
+ he->branch_info->from.map->dso != dso);
+}
+
static int64_t
sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
{
@@ -552,6 +608,18 @@ static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
}
+static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
+ const void *arg)
+{
+ const struct dso *dso = arg;
+
+ if (type != HIST_FILTER__DSO)
+ return -1;
+
+ return dso && (!he->branch_info || !he->branch_info->to.map ||
+ he->branch_info->to.map->dso != dso);
+}
+
static int64_t
sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
{
@@ -613,10 +681,35 @@ static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
}
+static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
+ const void *arg)
+{
+ const char *sym = arg;
+
+ if (type != HIST_FILTER__SYMBOL)
+ return -1;
+
+ return sym && !(he->branch_info && he->branch_info->from.sym &&
+ strstr(he->branch_info->from.sym->name, sym));
+}
+
+static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
+ const void *arg)
+{
+ const char *sym = arg;
+
+ if (type != HIST_FILTER__SYMBOL)
+ return -1;
+
+ return sym && !(he->branch_info && he->branch_info->to.sym &&
+ strstr(he->branch_info->to.sym->name, sym));
+}
+
struct sort_entry sort_dso_from = {
.se_header = "Source Shared Object",
.se_cmp = sort__dso_from_cmp,
.se_snprintf = hist_entry__dso_from_snprintf,
+ .se_filter = hist_entry__dso_from_filter,
.se_width_idx = HISTC_DSO_FROM,
};
@@ -624,6 +717,7 @@ struct sort_entry sort_dso_to = {
.se_header = "Target Shared Object",
.se_cmp = sort__dso_to_cmp,
.se_snprintf = hist_entry__dso_to_snprintf,
+ .se_filter = hist_entry__dso_to_filter,
.se_width_idx = HISTC_DSO_TO,
};
@@ -631,6 +725,7 @@ struct sort_entry sort_sym_from = {
.se_header = "Source Symbol",
.se_cmp = sort__sym_from_cmp,
.se_snprintf = hist_entry__sym_from_snprintf,
+ .se_filter = hist_entry__sym_from_filter,
.se_width_idx = HISTC_SYMBOL_FROM,
};
@@ -638,6 +733,7 @@ struct sort_entry sort_sym_to = {
.se_header = "Target Symbol",
.se_cmp = sort__sym_to_cmp,
.se_snprintf = hist_entry__sym_to_snprintf,
+ .se_filter = hist_entry__sym_to_filter,
.se_width_idx = HISTC_SYMBOL_TO,
};
@@ -797,20 +893,10 @@ sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- const char *out;
- u64 mask = PERF_MEM_LOCK_NA;
+ char out[10];
- if (he->mem_info)
- mask = he->mem_info->data_src.mem_lock;
-
- if (mask & PERF_MEM_LOCK_NA)
- out = "N/A";
- else if (mask & PERF_MEM_LOCK_LOCKED)
- out = "Yes";
- else
- out = "No";
-
- return repsep_snprintf(bf, size, "%-*s", width, out);
+ perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
+ return repsep_snprintf(bf, size, "%.*s", width, out);
}
static int64_t
@@ -832,54 +918,12 @@ sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
}
-static const char * const tlb_access[] = {
- "N/A",
- "HIT",
- "MISS",
- "L1",
- "L2",
- "Walker",
- "Fault",
-};
-#define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *))
-
static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
char out[64];
- size_t sz = sizeof(out) - 1; /* -1 for null termination */
- size_t l = 0, i;
- u64 m = PERF_MEM_TLB_NA;
- u64 hit, miss;
-
- out[0] = '\0';
-
- if (he->mem_info)
- m = he->mem_info->data_src.mem_dtlb;
-
- hit = m & PERF_MEM_TLB_HIT;
- miss = m & PERF_MEM_TLB_MISS;
-
- /* already taken care of */
- m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
-
- for (i = 0; m && i < NUM_TLB_ACCESS; i++, m >>= 1) {
- if (!(m & 0x1))
- continue;
- if (l) {
- strcat(out, " or ");
- l += 4;
- }
- strncat(out, tlb_access[i], sz - l);
- l += strlen(tlb_access[i]);
- }
- if (*out == '\0')
- strcpy(out, "N/A");
- if (hit)
- strncat(out, " hit", sz - l);
- if (miss)
- strncat(out, " miss", sz - l);
+ perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
return repsep_snprintf(bf, size, "%-*s", width, out);
}
@@ -902,61 +946,12 @@ sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
}
-static const char * const mem_lvl[] = {
- "N/A",
- "HIT",
- "MISS",
- "L1",
- "LFB",
- "L2",
- "L3",
- "Local RAM",
- "Remote RAM (1 hop)",
- "Remote RAM (2 hops)",
- "Remote Cache (1 hop)",
- "Remote Cache (2 hops)",
- "I/O",
- "Uncached",
-};
-#define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *))
-
static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
char out[64];
- size_t sz = sizeof(out) - 1; /* -1 for null termination */
- size_t i, l = 0;
- u64 m = PERF_MEM_LVL_NA;
- u64 hit, miss;
-
- if (he->mem_info)
- m = he->mem_info->data_src.mem_lvl;
-
- out[0] = '\0';
-
- hit = m & PERF_MEM_LVL_HIT;
- miss = m & PERF_MEM_LVL_MISS;
-
- /* already taken care of */
- m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS);
-
- for (i = 0; m && i < NUM_MEM_LVL; i++, m >>= 1) {
- if (!(m & 0x1))
- continue;
- if (l) {
- strcat(out, " or ");
- l += 4;
- }
- strncat(out, mem_lvl[i], sz - l);
- l += strlen(mem_lvl[i]);
- }
- if (*out == '\0')
- strcpy(out, "N/A");
- if (hit)
- strncat(out, " hit", sz - l);
- if (miss)
- strncat(out, " miss", sz - l);
+ perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
return repsep_snprintf(bf, size, "%-*s", width, out);
}
@@ -979,51 +974,15 @@ sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
}
-static const char * const snoop_access[] = {
- "N/A",
- "None",
- "Miss",
- "Hit",
- "HitM",
-};
-#define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *))
-
static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
char out[64];
- size_t sz = sizeof(out) - 1; /* -1 for null termination */
- size_t i, l = 0;
- u64 m = PERF_MEM_SNOOP_NA;
-
- out[0] = '\0';
-
- if (he->mem_info)
- m = he->mem_info->data_src.mem_snoop;
-
- for (i = 0; m && i < NUM_SNOOP_ACCESS; i++, m >>= 1) {
- if (!(m & 0x1))
- continue;
- if (l) {
- strcat(out, " or ");
- l += 4;
- }
- strncat(out, snoop_access[i], sz - l);
- l += strlen(snoop_access[i]);
- }
-
- if (*out == '\0')
- strcpy(out, "N/A");
+ perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
return repsep_snprintf(bf, size, "%-*s", width, out);
}
-static inline u64 cl_address(u64 address)
-{
- /* return the cacheline of the address */
- return (address & ~(cacheline_size - 1));
-}
-
static int64_t
sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
{
@@ -1440,20 +1399,6 @@ struct hpp_sort_entry {
struct sort_entry *se;
};
-bool perf_hpp__same_sort_entry(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
-{
- struct hpp_sort_entry *hse_a;
- struct hpp_sort_entry *hse_b;
-
- if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
- return false;
-
- hse_a = container_of(a, struct hpp_sort_entry, hpp);
- hse_b = container_of(b, struct hpp_sort_entry, hpp);
-
- return hse_a->se == hse_b->se;
-}
-
void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
{
struct hpp_sort_entry *hse;
@@ -1539,8 +1484,56 @@ static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
return sort_fn(a, b);
}
+bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
+{
+ return format->header == __sort__hpp_header;
+}
+
+#define MK_SORT_ENTRY_CHK(key) \
+bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
+{ \
+ struct hpp_sort_entry *hse; \
+ \
+ if (!perf_hpp__is_sort_entry(fmt)) \
+ return false; \
+ \
+ hse = container_of(fmt, struct hpp_sort_entry, hpp); \
+ return hse->se == &sort_ ## key ; \
+}
+
+MK_SORT_ENTRY_CHK(trace)
+MK_SORT_ENTRY_CHK(srcline)
+MK_SORT_ENTRY_CHK(srcfile)
+MK_SORT_ENTRY_CHK(thread)
+MK_SORT_ENTRY_CHK(comm)
+MK_SORT_ENTRY_CHK(dso)
+MK_SORT_ENTRY_CHK(sym)
+
+
+static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
+{
+ struct hpp_sort_entry *hse_a;
+ struct hpp_sort_entry *hse_b;
+
+ if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
+ return false;
+
+ hse_a = container_of(a, struct hpp_sort_entry, hpp);
+ hse_b = container_of(b, struct hpp_sort_entry, hpp);
+
+ return hse_a->se == hse_b->se;
+}
+
+static void hse_free(struct perf_hpp_fmt *fmt)
+{
+ struct hpp_sort_entry *hse;
+
+ hse = container_of(fmt, struct hpp_sort_entry, hpp);
+ free(hse);
+}
+
static struct hpp_sort_entry *
-__sort_dimension__alloc_hpp(struct sort_dimension *sd)
+__sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
{
struct hpp_sort_entry *hse;
@@ -1560,40 +1553,92 @@ __sort_dimension__alloc_hpp(struct sort_dimension *sd)
hse->hpp.cmp = __sort__hpp_cmp;
hse->hpp.collapse = __sort__hpp_collapse;
hse->hpp.sort = __sort__hpp_sort;
+ hse->hpp.equal = __sort__hpp_equal;
+ hse->hpp.free = hse_free;
INIT_LIST_HEAD(&hse->hpp.list);
INIT_LIST_HEAD(&hse->hpp.sort_list);
hse->hpp.elide = false;
hse->hpp.len = 0;
hse->hpp.user_len = 0;
+ hse->hpp.level = level;
return hse;
}
-bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
+static void hpp_free(struct perf_hpp_fmt *fmt)
{
- return format->header == __sort__hpp_header;
+ free(fmt);
+}
+
+static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
+ int level)
+{
+ struct perf_hpp_fmt *fmt;
+
+ fmt = memdup(hd->fmt, sizeof(*fmt));
+ if (fmt) {
+ INIT_LIST_HEAD(&fmt->list);
+ INIT_LIST_HEAD(&fmt->sort_list);
+ fmt->free = hpp_free;
+ fmt->level = level;
+ }
+
+ return fmt;
+}
+
+int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
+{
+ struct perf_hpp_fmt *fmt;
+ struct hpp_sort_entry *hse;
+ int ret = -1;
+ int r;
+
+ perf_hpp_list__for_each_format(he->hpp_list, fmt) {
+ if (!perf_hpp__is_sort_entry(fmt))
+ continue;
+
+ hse = container_of(fmt, struct hpp_sort_entry, hpp);
+ if (hse->se->se_filter == NULL)
+ continue;
+
+ /*
+ * hist entry is filtered if any of sort key in the hpp list
+ * is applied. But it should skip non-matched filter types.
+ */
+ r = hse->se->se_filter(he, type, arg);
+ if (r >= 0) {
+ if (ret < 0)
+ ret = 0;
+ ret |= r;
+ }
+ }
+
+ return ret;
}
-static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd)
+static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
+ struct perf_hpp_list *list,
+ int level)
{
- struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
+ struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
if (hse == NULL)
return -1;
- perf_hpp__register_sort_field(&hse->hpp);
+ perf_hpp_list__register_sort_field(list, &hse->hpp);
return 0;
}
-static int __sort_dimension__add_hpp_output(struct sort_dimension *sd)
+static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
+ struct perf_hpp_list *list)
{
- struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
+ struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
if (hse == NULL)
return -1;
- perf_hpp__column_register(&hse->hpp);
+ perf_hpp_list__column_register(list, &hse->hpp);
return 0;
}
@@ -1727,6 +1772,9 @@ static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
if (hde->raw_trace)
goto raw_field;
+ if (!he->trace_output)
+ he->trace_output = get_trace_output(he);
+
field = hde->field;
namelen = strlen(field->name);
str = he->trace_output;
@@ -1776,6 +1824,11 @@ static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
+ if (b == NULL) {
+ update_dynamic_len(hde, a);
+ return 0;
+ }
+
field = hde->field;
if (field->flags & FIELD_IS_DYNAMIC) {
unsigned long long dyn;
@@ -1790,9 +1843,6 @@ static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
} else {
offset = field->offset;
size = field->size;
-
- update_dynamic_len(hde, a);
- update_dynamic_len(hde, b);
}
return memcmp(a->raw_data + offset, b->raw_data + offset, size);
@@ -1803,8 +1853,31 @@ bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
return fmt->cmp == __sort__hde_cmp;
}
+static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
+{
+ struct hpp_dynamic_entry *hde_a;
+ struct hpp_dynamic_entry *hde_b;
+
+ if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
+ return false;
+
+ hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
+ hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
+
+ return hde_a->field == hde_b->field;
+}
+
+static void hde_free(struct perf_hpp_fmt *fmt)
+{
+ struct hpp_dynamic_entry *hde;
+
+ hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
+ free(hde);
+}
+
static struct hpp_dynamic_entry *
-__alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field)
+__alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field,
+ int level)
{
struct hpp_dynamic_entry *hde;
@@ -1827,16 +1900,47 @@ __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field)
hde->hpp.cmp = __sort__hde_cmp;
hde->hpp.collapse = __sort__hde_cmp;
hde->hpp.sort = __sort__hde_cmp;
+ hde->hpp.equal = __sort__hde_equal;
+ hde->hpp.free = hde_free;
INIT_LIST_HEAD(&hde->hpp.list);
INIT_LIST_HEAD(&hde->hpp.sort_list);
hde->hpp.elide = false;
hde->hpp.len = 0;
hde->hpp.user_len = 0;
+ hde->hpp.level = level;
return hde;
}
+struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
+{
+ struct perf_hpp_fmt *new_fmt = NULL;
+
+ if (perf_hpp__is_sort_entry(fmt)) {
+ struct hpp_sort_entry *hse, *new_hse;
+
+ hse = container_of(fmt, struct hpp_sort_entry, hpp);
+ new_hse = memdup(hse, sizeof(*hse));
+ if (new_hse)
+ new_fmt = &new_hse->hpp;
+ } else if (perf_hpp__is_dynamic_entry(fmt)) {
+ struct hpp_dynamic_entry *hde, *new_hde;
+
+ hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
+ new_hde = memdup(hde, sizeof(*hde));
+ if (new_hde)
+ new_fmt = &new_hde->hpp;
+ } else {
+ new_fmt = memdup(fmt, sizeof(*fmt));
+ }
+
+ INIT_LIST_HEAD(&new_fmt->list);
+ INIT_LIST_HEAD(&new_fmt->sort_list);
+
+ return new_fmt;
+}
+
static int parse_field_name(char *str, char **event, char **field, char **opt)
{
char *event_name, *field_name, *opt_name;
@@ -1908,11 +2012,11 @@ static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_nam
static int __dynamic_dimension__add(struct perf_evsel *evsel,
struct format_field *field,
- bool raw_trace)
+ bool raw_trace, int level)
{
struct hpp_dynamic_entry *hde;
- hde = __alloc_dynamic_entry(evsel, field);
+ hde = __alloc_dynamic_entry(evsel, field, level);
if (hde == NULL)
return -ENOMEM;
@@ -1922,14 +2026,14 @@ static int __dynamic_dimension__add(struct perf_evsel *evsel,
return 0;
}
-static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace)
+static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level)
{
int ret;
struct format_field *field;
field = evsel->tp_format->format.fields;
while (field) {
- ret = __dynamic_dimension__add(evsel, field, raw_trace);
+ ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
if (ret < 0)
return ret;
@@ -1938,7 +2042,8 @@ static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace)
return 0;
}
-static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace)
+static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace,
+ int level)
{
int ret;
struct perf_evsel *evsel;
@@ -1947,7 +2052,7 @@ static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace)
if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
continue;
- ret = add_evsel_fields(evsel, raw_trace);
+ ret = add_evsel_fields(evsel, raw_trace, level);
if (ret < 0)
return ret;
}
@@ -1955,7 +2060,7 @@ static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace)
}
static int add_all_matching_fields(struct perf_evlist *evlist,
- char *field_name, bool raw_trace)
+ char *field_name, bool raw_trace, int level)
{
int ret = -ESRCH;
struct perf_evsel *evsel;
@@ -1969,14 +2074,15 @@ static int add_all_matching_fields(struct perf_evlist *evlist,
if (field == NULL)
continue;
- ret = __dynamic_dimension__add(evsel, field, raw_trace);
+ ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
if (ret < 0)
break;
}
return ret;
}
-static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok)
+static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok,
+ int level)
{
char *str, *event_name, *field_name, *opt_name;
struct perf_evsel *evsel;
@@ -2006,12 +2112,12 @@ static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok)
}
if (!strcmp(field_name, "trace_fields")) {
- ret = add_all_dynamic_fields(evlist, raw_trace);
+ ret = add_all_dynamic_fields(evlist, raw_trace, level);
goto out;
}
if (event_name == NULL) {
- ret = add_all_matching_fields(evlist, field_name, raw_trace);
+ ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
goto out;
}
@@ -2029,7 +2135,7 @@ static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok)
}
if (!strcmp(field_name, "*")) {
- ret = add_evsel_fields(evsel, raw_trace);
+ ret = add_evsel_fields(evsel, raw_trace, level);
} else {
field = pevent_find_any_field(evsel->tp_format, field_name);
if (field == NULL) {
@@ -2038,7 +2144,7 @@ static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok)
return -ENOENT;
}
- ret = __dynamic_dimension__add(evsel, field, raw_trace);
+ ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
}
out:
@@ -2046,12 +2152,14 @@ out:
return ret;
}
-static int __sort_dimension__add(struct sort_dimension *sd)
+static int __sort_dimension__add(struct sort_dimension *sd,
+ struct perf_hpp_list *list,
+ int level)
{
if (sd->taken)
return 0;
- if (__sort_dimension__add_hpp_sort(sd) < 0)
+ if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
return -1;
if (sd->entry->se_collapse)
@@ -2062,46 +2170,63 @@ static int __sort_dimension__add(struct sort_dimension *sd)
return 0;
}
-static int __hpp_dimension__add(struct hpp_dimension *hd)
+static int __hpp_dimension__add(struct hpp_dimension *hd,
+ struct perf_hpp_list *list,
+ int level)
{
- if (!hd->taken) {
- hd->taken = 1;
+ struct perf_hpp_fmt *fmt;
- perf_hpp__register_sort_field(hd->fmt);
- }
+ if (hd->taken)
+ return 0;
+
+ fmt = __hpp_dimension__alloc_hpp(hd, level);
+ if (!fmt)
+ return -1;
+
+ hd->taken = 1;
+ perf_hpp_list__register_sort_field(list, fmt);
return 0;
}
-static int __sort_dimension__add_output(struct sort_dimension *sd)
+static int __sort_dimension__add_output(struct perf_hpp_list *list,
+ struct sort_dimension *sd)
{
if (sd->taken)
return 0;
- if (__sort_dimension__add_hpp_output(sd) < 0)
+ if (__sort_dimension__add_hpp_output(sd, list) < 0)
return -1;
sd->taken = 1;
return 0;
}
-static int __hpp_dimension__add_output(struct hpp_dimension *hd)
+static int __hpp_dimension__add_output(struct perf_hpp_list *list,
+ struct hpp_dimension *hd)
{
- if (!hd->taken) {
- hd->taken = 1;
+ struct perf_hpp_fmt *fmt;
- perf_hpp__column_register(hd->fmt);
- }
+ if (hd->taken)
+ return 0;
+
+ fmt = __hpp_dimension__alloc_hpp(hd, 0);
+ if (!fmt)
+ return -1;
+
+ hd->taken = 1;
+ perf_hpp_list__column_register(list, fmt);
return 0;
}
int hpp_dimension__add_output(unsigned col)
{
BUG_ON(col >= PERF_HPP__MAX_INDEX);
- return __hpp_dimension__add_output(&hpp_sort_dimensions[col]);
+ return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
}
-static int sort_dimension__add(const char *tok,
- struct perf_evlist *evlist __maybe_unused)
+static int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
+ struct perf_evlist *evlist __maybe_unused,
+ int level)
{
unsigned int i;
@@ -2136,9 +2261,13 @@ static int sort_dimension__add(const char *tok,
sort__has_dso = 1;
} else if (sd->entry == &sort_socket) {
sort__has_socket = 1;
+ } else if (sd->entry == &sort_thread) {
+ sort__has_thread = 1;
+ } else if (sd->entry == &sort_comm) {
+ sort__has_comm = 1;
}
- return __sort_dimension__add(sd);
+ return __sort_dimension__add(sd, list, level);
}
for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
@@ -2147,7 +2276,7 @@ static int sort_dimension__add(const char *tok,
if (strncasecmp(tok, hd->name, strlen(tok)))
continue;
- return __hpp_dimension__add(hd);
+ return __hpp_dimension__add(hd, list, level);
}
for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
@@ -2162,7 +2291,7 @@ static int sort_dimension__add(const char *tok,
if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
sort__has_sym = 1;
- __sort_dimension__add(sd);
+ __sort_dimension__add(sd, list, level);
return 0;
}
@@ -2178,16 +2307,60 @@ static int sort_dimension__add(const char *tok,
if (sd->entry == &sort_mem_daddr_sym)
sort__has_sym = 1;
- __sort_dimension__add(sd);
+ __sort_dimension__add(sd, list, level);
return 0;
}
- if (!add_dynamic_entry(evlist, tok))
+ if (!add_dynamic_entry(evlist, tok, level))
return 0;
return -ESRCH;
}
+static int setup_sort_list(struct perf_hpp_list *list, char *str,
+ struct perf_evlist *evlist)
+{
+ char *tmp, *tok;
+ int ret = 0;
+ int level = 0;
+ int next_level = 1;
+ bool in_group = false;
+
+ do {
+ tok = str;
+ tmp = strpbrk(str, "{}, ");
+ if (tmp) {
+ if (in_group)
+ next_level = level;
+ else
+ next_level = level + 1;
+
+ if (*tmp == '{')
+ in_group = true;
+ else if (*tmp == '}')
+ in_group = false;
+
+ *tmp = '\0';
+ str = tmp + 1;
+ }
+
+ if (*tok) {
+ ret = sort_dimension__add(list, tok, evlist, level);
+ if (ret == -EINVAL) {
+ error("Invalid --sort key: `%s'", tok);
+ break;
+ } else if (ret == -ESRCH) {
+ error("Unknown --sort key: `%s'", tok);
+ break;
+ }
+ }
+
+ level = next_level;
+ } while (tmp);
+
+ return ret;
+}
+
static const char *get_default_sort_order(struct perf_evlist *evlist)
{
const char *default_sort_orders[] = {
@@ -2282,7 +2455,7 @@ static char *setup_overhead(char *keys)
static int __setup_sorting(struct perf_evlist *evlist)
{
- char *tmp, *tok, *str;
+ char *str;
const char *sort_keys;
int ret = 0;
@@ -2320,17 +2493,7 @@ static int __setup_sorting(struct perf_evlist *evlist)
}
}
- for (tok = strtok_r(str, ", ", &tmp);
- tok; tok = strtok_r(NULL, ", ", &tmp)) {
- ret = sort_dimension__add(tok, evlist);
- if (ret == -EINVAL) {
- error("Invalid --sort key: `%s'", tok);
- break;
- } else if (ret == -ESRCH) {
- error("Unknown --sort key: `%s'", tok);
- break;
- }
- }
+ ret = setup_sort_list(&perf_hpp_list, str, evlist);
free(str);
return ret;
@@ -2341,7 +2504,7 @@ void perf_hpp__set_elide(int idx, bool elide)
struct perf_hpp_fmt *fmt;
struct hpp_sort_entry *hse;
- perf_hpp__for_each_format(fmt) {
+ perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
if (!perf_hpp__is_sort_entry(fmt))
continue;
@@ -2401,7 +2564,7 @@ void sort__setup_elide(FILE *output)
struct perf_hpp_fmt *fmt;
struct hpp_sort_entry *hse;
- perf_hpp__for_each_format(fmt) {
+ perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
if (!perf_hpp__is_sort_entry(fmt))
continue;
@@ -2413,7 +2576,7 @@ void sort__setup_elide(FILE *output)
* It makes no sense to elide all of sort entries.
* Just revert them to show up again.
*/
- perf_hpp__for_each_format(fmt) {
+ perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
if (!perf_hpp__is_sort_entry(fmt))
continue;
@@ -2421,7 +2584,7 @@ void sort__setup_elide(FILE *output)
return;
}
- perf_hpp__for_each_format(fmt) {
+ perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
if (!perf_hpp__is_sort_entry(fmt))
continue;
@@ -2429,7 +2592,7 @@ void sort__setup_elide(FILE *output)
}
}
-static int output_field_add(char *tok)
+static int output_field_add(struct perf_hpp_list *list, char *tok)
{
unsigned int i;
@@ -2439,7 +2602,7 @@ static int output_field_add(char *tok)
if (strncasecmp(tok, sd->name, strlen(tok)))
continue;
- return __sort_dimension__add_output(sd);
+ return __sort_dimension__add_output(list, sd);
}
for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
@@ -2448,7 +2611,7 @@ static int output_field_add(char *tok)
if (strncasecmp(tok, hd->name, strlen(tok)))
continue;
- return __hpp_dimension__add_output(hd);
+ return __hpp_dimension__add_output(list, hd);
}
for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
@@ -2457,7 +2620,7 @@ static int output_field_add(char *tok)
if (strncasecmp(tok, sd->name, strlen(tok)))
continue;
- return __sort_dimension__add_output(sd);
+ return __sort_dimension__add_output(list, sd);
}
for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
@@ -2466,12 +2629,32 @@ static int output_field_add(char *tok)
if (strncasecmp(tok, sd->name, strlen(tok)))
continue;
- return __sort_dimension__add_output(sd);
+ return __sort_dimension__add_output(list, sd);
}
return -ESRCH;
}
+static int setup_output_list(struct perf_hpp_list *list, char *str)
+{
+ char *tmp, *tok;
+ int ret = 0;
+
+ for (tok = strtok_r(str, ", ", &tmp);
+ tok; tok = strtok_r(NULL, ", ", &tmp)) {
+ ret = output_field_add(list, tok);
+ if (ret == -EINVAL) {
+ error("Invalid --fields key: `%s'", tok);
+ break;
+ } else if (ret == -ESRCH) {
+ error("Unknown --fields key: `%s'", tok);
+ break;
+ }
+ }
+
+ return ret;
+}
+
static void reset_dimensions(void)
{
unsigned int i;
@@ -2496,7 +2679,7 @@ bool is_strict_order(const char *order)
static int __setup_output_field(void)
{
- char *tmp, *tok, *str, *strp;
+ char *str, *strp;
int ret = -EINVAL;
if (field_order == NULL)
@@ -2516,17 +2699,7 @@ static int __setup_output_field(void)
goto out;
}
- for (tok = strtok_r(strp, ", ", &tmp);
- tok; tok = strtok_r(NULL, ", ", &tmp)) {
- ret = output_field_add(tok);
- if (ret == -EINVAL) {
- error("Invalid --fields key: `%s'", tok);
- break;
- } else if (ret == -ESRCH) {
- error("Unknown --fields key: `%s'", tok);
- break;
- }
- }
+ ret = setup_output_list(&perf_hpp_list, strp);
out:
free(str);
@@ -2542,7 +2715,7 @@ int setup_sorting(struct perf_evlist *evlist)
return err;
if (parent_pattern != default_parent_pattern) {
- err = sort_dimension__add("parent", evlist);
+ err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
if (err < 0)
return err;
}
@@ -2560,9 +2733,13 @@ int setup_sorting(struct perf_evlist *evlist)
return err;
/* copy sort keys to output fields */
- perf_hpp__setup_output_field();
+ perf_hpp__setup_output_field(&perf_hpp_list);
/* and then copy output fields to sort keys */
- perf_hpp__append_sort_keys();
+ perf_hpp__append_sort_keys(&perf_hpp_list);
+
+ /* setup hists-specific output fields */
+ if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
+ return -1;
return 0;
}
@@ -2578,5 +2755,5 @@ void reset_output_field(void)
sort_order = NULL;
reset_dimensions();
- perf_hpp__reset_output_field();
+ perf_hpp__reset_output_field(&perf_hpp_list);
}
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 687bbb124428..3f4e35998119 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -32,9 +32,12 @@ extern const char default_sort_order[];
extern regex_t ignore_callees_regex;
extern int have_ignore_callees;
extern int sort__need_collapse;
+extern int sort__has_dso;
extern int sort__has_parent;
extern int sort__has_sym;
extern int sort__has_socket;
+extern int sort__has_thread;
+extern int sort__has_comm;
extern enum sort_mode sort__mode;
extern struct sort_entry sort_comm;
extern struct sort_entry sort_dso;
@@ -94,9 +97,11 @@ struct hist_entry {
s32 socket;
s32 cpu;
u8 cpumode;
+ u8 depth;
/* We are added by hists__add_dummy_entry. */
bool dummy;
+ bool leaf;
char level;
u8 filtered;
@@ -113,18 +118,28 @@ struct hist_entry {
bool init_have_children;
bool unfolded;
bool has_children;
+ bool has_no_entry;
};
};
char *srcline;
char *srcfile;
struct symbol *parent;
- struct rb_root sorted_chain;
struct branch_info *branch_info;
struct hists *hists;
struct mem_info *mem_info;
void *raw_data;
u32 raw_size;
void *trace_output;
+ struct perf_hpp_list *hpp_list;
+ struct hist_entry *parent_he;
+ union {
+ /* this is for hierarchical entry structure */
+ struct {
+ struct rb_root hroot_in;
+ struct rb_root hroot_out;
+ }; /* non-leaf entries */
+ struct rb_root sorted_chain; /* leaf entry has callchains */
+ };
struct callchain_root callchain[0]; /* must be last member */
};
@@ -160,6 +175,17 @@ static inline float hist_entry__get_percent_limit(struct hist_entry *he)
return period * 100.0 / total_period;
}
+static inline u64 cl_address(u64 address)
+{
+ /* return the cacheline of the address */
+ return (address & ~(cacheline_size - 1));
+}
+
+static inline u64 cl_offset(u64 address)
+{
+ /* return the cacheline of the address */
+ return (address & (cacheline_size - 1));
+}
enum sort_mode {
SORT_MODE__NORMAL,
@@ -221,6 +247,7 @@ struct sort_entry {
int64_t (*se_sort)(struct hist_entry *, struct hist_entry *);
int (*se_snprintf)(struct hist_entry *he, char *bf, size_t size,
unsigned int width);
+ int (*se_filter)(struct hist_entry *he, int type, const void *arg);
u8 se_width_idx;
};
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 6ac03146889d..b33ffb2af2cf 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -2,6 +2,7 @@
#include "evsel.h"
#include "stat.h"
#include "color.h"
+#include "pmu.h"
enum {
CTX_BIT_USER = 1 << 0,
@@ -14,6 +15,13 @@ enum {
#define NUM_CTX CTX_BIT_MAX
+/*
+ * AGGR_GLOBAL: Use CPU 0
+ * AGGR_SOCKET: Use first CPU of socket
+ * AGGR_CORE: Use first CPU of core
+ * AGGR_NONE: Use matching CPU
+ * AGGR_THREAD: Not supported?
+ */
static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
static struct stats runtime_cycles_stats[NUM_CTX][MAX_NR_CPUS];
static struct stats runtime_stalled_cycles_front_stats[NUM_CTX][MAX_NR_CPUS];
@@ -28,9 +36,15 @@ static struct stats runtime_dtlb_cache_stats[NUM_CTX][MAX_NR_CPUS];
static struct stats runtime_cycles_in_tx_stats[NUM_CTX][MAX_NR_CPUS];
static struct stats runtime_transaction_stats[NUM_CTX][MAX_NR_CPUS];
static struct stats runtime_elision_stats[NUM_CTX][MAX_NR_CPUS];
+static bool have_frontend_stalled;
struct stats walltime_nsecs_stats;
+void perf_stat__init_shadow_stats(void)
+{
+ have_frontend_stalled = pmu_have_event("cpu", "stalled-cycles-frontend");
+}
+
static int evsel_context(struct perf_evsel *evsel)
{
int ctx = 0;
@@ -137,9 +151,10 @@ static const char *get_ratio_color(enum grc_type type, double ratio)
return color;
}
-static void print_stalled_cycles_frontend(FILE *out, int cpu,
+static void print_stalled_cycles_frontend(int cpu,
struct perf_evsel *evsel
- __maybe_unused, double avg)
+ __maybe_unused, double avg,
+ struct perf_stat_output_ctx *out)
{
double total, ratio = 0.0;
const char *color;
@@ -152,14 +167,17 @@ static void print_stalled_cycles_frontend(FILE *out, int cpu,
color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio);
- fprintf(out, " # ");
- color_fprintf(out, color, "%6.2f%%", ratio);
- fprintf(out, " frontend cycles idle ");
+ if (ratio)
+ out->print_metric(out->ctx, color, "%7.2f%%", "frontend cycles idle",
+ ratio);
+ else
+ out->print_metric(out->ctx, NULL, NULL, "frontend cycles idle", 0);
}
-static void print_stalled_cycles_backend(FILE *out, int cpu,
+static void print_stalled_cycles_backend(int cpu,
struct perf_evsel *evsel
- __maybe_unused, double avg)
+ __maybe_unused, double avg,
+ struct perf_stat_output_ctx *out)
{
double total, ratio = 0.0;
const char *color;
@@ -172,14 +190,13 @@ static void print_stalled_cycles_backend(FILE *out, int cpu,
color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
- fprintf(out, " # ");
- color_fprintf(out, color, "%6.2f%%", ratio);
- fprintf(out, " backend cycles idle ");
+ out->print_metric(out->ctx, color, "%6.2f%%", "backend cycles idle", ratio);
}
-static void print_branch_misses(FILE *out, int cpu,
+static void print_branch_misses(int cpu,
struct perf_evsel *evsel __maybe_unused,
- double avg)
+ double avg,
+ struct perf_stat_output_ctx *out)
{
double total, ratio = 0.0;
const char *color;
@@ -192,14 +209,13 @@ static void print_branch_misses(FILE *out, int cpu,
color = get_ratio_color(GRC_CACHE_MISSES, ratio);
- fprintf(out, " # ");
- color_fprintf(out, color, "%6.2f%%", ratio);
- fprintf(out, " of all branches ");
+ out->print_metric(out->ctx, color, "%7.2f%%", "of all branches", ratio);
}
-static void print_l1_dcache_misses(FILE *out, int cpu,
+static void print_l1_dcache_misses(int cpu,
struct perf_evsel *evsel __maybe_unused,
- double avg)
+ double avg,
+ struct perf_stat_output_ctx *out)
{
double total, ratio = 0.0;
const char *color;
@@ -212,14 +228,13 @@ static void print_l1_dcache_misses(FILE *out, int cpu,
color = get_ratio_color(GRC_CACHE_MISSES, ratio);
- fprintf(out, " # ");
- color_fprintf(out, color, "%6.2f%%", ratio);
- fprintf(out, " of all L1-dcache hits ");
+ out->print_metric(out->ctx, color, "%7.2f%%", "of all L1-dcache hits", ratio);
}
-static void print_l1_icache_misses(FILE *out, int cpu,
+static void print_l1_icache_misses(int cpu,
struct perf_evsel *evsel __maybe_unused,
- double avg)
+ double avg,
+ struct perf_stat_output_ctx *out)
{
double total, ratio = 0.0;
const char *color;
@@ -231,15 +246,13 @@ static void print_l1_icache_misses(FILE *out, int cpu,
ratio = avg / total * 100.0;
color = get_ratio_color(GRC_CACHE_MISSES, ratio);
-
- fprintf(out, " # ");
- color_fprintf(out, color, "%6.2f%%", ratio);
- fprintf(out, " of all L1-icache hits ");
+ out->print_metric(out->ctx, color, "%7.2f%%", "of all L1-icache hits", ratio);
}
-static void print_dtlb_cache_misses(FILE *out, int cpu,
+static void print_dtlb_cache_misses(int cpu,
struct perf_evsel *evsel __maybe_unused,
- double avg)
+ double avg,
+ struct perf_stat_output_ctx *out)
{
double total, ratio = 0.0;
const char *color;
@@ -251,15 +264,13 @@ static void print_dtlb_cache_misses(FILE *out, int cpu,
ratio = avg / total * 100.0;
color = get_ratio_color(GRC_CACHE_MISSES, ratio);
-
- fprintf(out, " # ");
- color_fprintf(out, color, "%6.2f%%", ratio);
- fprintf(out, " of all dTLB cache hits ");
+ out->print_metric(out->ctx, color, "%7.2f%%", "of all dTLB cache hits", ratio);
}
-static void print_itlb_cache_misses(FILE *out, int cpu,
+static void print_itlb_cache_misses(int cpu,
struct perf_evsel *evsel __maybe_unused,
- double avg)
+ double avg,
+ struct perf_stat_output_ctx *out)
{
double total, ratio = 0.0;
const char *color;
@@ -271,15 +282,13 @@ static void print_itlb_cache_misses(FILE *out, int cpu,
ratio = avg / total * 100.0;
color = get_ratio_color(GRC_CACHE_MISSES, ratio);
-
- fprintf(out, " # ");
- color_fprintf(out, color, "%6.2f%%", ratio);
- fprintf(out, " of all iTLB cache hits ");
+ out->print_metric(out->ctx, color, "%7.2f%%", "of all iTLB cache hits", ratio);
}
-static void print_ll_cache_misses(FILE *out, int cpu,
+static void print_ll_cache_misses(int cpu,
struct perf_evsel *evsel __maybe_unused,
- double avg)
+ double avg,
+ struct perf_stat_output_ctx *out)
{
double total, ratio = 0.0;
const char *color;
@@ -291,15 +300,15 @@ static void print_ll_cache_misses(FILE *out, int cpu,
ratio = avg / total * 100.0;
color = get_ratio_color(GRC_CACHE_MISSES, ratio);
-
- fprintf(out, " # ");
- color_fprintf(out, color, "%6.2f%%", ratio);
- fprintf(out, " of all LL-cache hits ");
+ out->print_metric(out->ctx, color, "%7.2f%%", "of all LL-cache hits", ratio);
}
-void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel,
- double avg, int cpu, enum aggr_mode aggr)
+void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
+ double avg, int cpu,
+ struct perf_stat_output_ctx *out)
{
+ void *ctxp = out->ctx;
+ print_metric_t print_metric = out->print_metric;
double total, ratio = 0.0, total2;
int ctx = evsel_context(evsel);
@@ -307,119 +316,145 @@ void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel,
total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
if (total) {
ratio = avg / total;
- fprintf(out, " # %5.2f insns per cycle ", ratio);
+ print_metric(ctxp, NULL, "%7.2f ",
+ "insn per cycle", ratio);
} else {
- fprintf(out, " ");
+ print_metric(ctxp, NULL, NULL, "insn per cycle", 0);
}
total = avg_stats(&runtime_stalled_cycles_front_stats[ctx][cpu]);
total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[ctx][cpu]));
if (total && avg) {
+ out->new_line(ctxp);
ratio = total / avg;
- fprintf(out, "\n");
- if (aggr == AGGR_NONE)
- fprintf(out, " ");
- fprintf(out, " # %5.2f stalled cycles per insn", ratio);
+ print_metric(ctxp, NULL, "%7.2f ",
+ "stalled cycles per insn",
+ ratio);
+ } else if (have_frontend_stalled) {
+ print_metric(ctxp, NULL, NULL,
+ "stalled cycles per insn", 0);
}
-
- } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) &&
- runtime_branches_stats[ctx][cpu].n != 0) {
- print_branch_misses(out, cpu, evsel, avg);
+ } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
+ if (runtime_branches_stats[ctx][cpu].n != 0)
+ print_branch_misses(cpu, evsel, avg, out);
+ else
+ print_metric(ctxp, NULL, NULL, "of all branches", 0);
} else if (
evsel->attr.type == PERF_TYPE_HW_CACHE &&
evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
- ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
- runtime_l1_dcache_stats[ctx][cpu].n != 0) {
- print_l1_dcache_misses(out, cpu, evsel, avg);
+ ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
+ if (runtime_l1_dcache_stats[ctx][cpu].n != 0)
+ print_l1_dcache_misses(cpu, evsel, avg, out);
+ else
+ print_metric(ctxp, NULL, NULL, "of all L1-dcache hits", 0);
} else if (
evsel->attr.type == PERF_TYPE_HW_CACHE &&
evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
- ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
- runtime_l1_icache_stats[ctx][cpu].n != 0) {
- print_l1_icache_misses(out, cpu, evsel, avg);
+ ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
+ if (runtime_l1_icache_stats[ctx][cpu].n != 0)
+ print_l1_icache_misses(cpu, evsel, avg, out);
+ else
+ print_metric(ctxp, NULL, NULL, "of all L1-icache hits", 0);
} else if (
evsel->attr.type == PERF_TYPE_HW_CACHE &&
evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
- ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
- runtime_dtlb_cache_stats[ctx][cpu].n != 0) {
- print_dtlb_cache_misses(out, cpu, evsel, avg);
+ ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
+ if (runtime_dtlb_cache_stats[ctx][cpu].n != 0)
+ print_dtlb_cache_misses(cpu, evsel, avg, out);
+ else
+ print_metric(ctxp, NULL, NULL, "of all dTLB cache hits", 0);
} else if (
evsel->attr.type == PERF_TYPE_HW_CACHE &&
evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
- ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
- runtime_itlb_cache_stats[ctx][cpu].n != 0) {
- print_itlb_cache_misses(out, cpu, evsel, avg);
+ ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
+ if (runtime_itlb_cache_stats[ctx][cpu].n != 0)
+ print_itlb_cache_misses(cpu, evsel, avg, out);
+ else
+ print_metric(ctxp, NULL, NULL, "of all iTLB cache hits", 0);
} else if (
evsel->attr.type == PERF_TYPE_HW_CACHE &&
evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
- ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
- runtime_ll_cache_stats[ctx][cpu].n != 0) {
- print_ll_cache_misses(out, cpu, evsel, avg);
- } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) &&
- runtime_cacherefs_stats[ctx][cpu].n != 0) {
+ ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
+ if (runtime_ll_cache_stats[ctx][cpu].n != 0)
+ print_ll_cache_misses(cpu, evsel, avg, out);
+ else
+ print_metric(ctxp, NULL, NULL, "of all LL-cache hits", 0);
+ } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) {
total = avg_stats(&runtime_cacherefs_stats[ctx][cpu]);
if (total)
ratio = avg * 100 / total;
- fprintf(out, " # %8.3f %% of all cache refs ", ratio);
-
+ if (runtime_cacherefs_stats[ctx][cpu].n != 0)
+ print_metric(ctxp, NULL, "%8.3f %%",
+ "of all cache refs", ratio);
+ else
+ print_metric(ctxp, NULL, NULL, "of all cache refs", 0);
} else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
- print_stalled_cycles_frontend(out, cpu, evsel, avg);
+ print_stalled_cycles_frontend(cpu, evsel, avg, out);
} else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
- print_stalled_cycles_backend(out, cpu, evsel, avg);
+ print_stalled_cycles_backend(cpu, evsel, avg, out);
} else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
total = avg_stats(&runtime_nsecs_stats[cpu]);
if (total) {
ratio = avg / total;
- fprintf(out, " # %8.3f GHz ", ratio);
+ print_metric(ctxp, NULL, "%8.3f", "GHz", ratio);
} else {
- fprintf(out, " ");
+ print_metric(ctxp, NULL, NULL, "Ghz", 0);
}
} else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) {
total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
if (total)
- fprintf(out,
- " # %5.2f%% transactional cycles ",
- 100.0 * (avg / total));
+ print_metric(ctxp, NULL,
+ "%7.2f%%", "transactional cycles",
+ 100.0 * (avg / total));
+ else
+ print_metric(ctxp, NULL, NULL, "transactional cycles",
+ 0);
} else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) {
total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
total2 = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
if (total2 < avg)
total2 = avg;
if (total)
- fprintf(out,
- " # %5.2f%% aborted cycles ",
+ print_metric(ctxp, NULL, "%7.2f%%", "aborted cycles",
100.0 * ((total2-avg) / total));
- } else if (perf_stat_evsel__is(evsel, TRANSACTION_START) &&
- runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
+ else
+ print_metric(ctxp, NULL, NULL, "aborted cycles", 0);
+ } else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) {
total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
if (avg)
ratio = total / avg;
- fprintf(out, " # %8.0f cycles / transaction ", ratio);
- } else if (perf_stat_evsel__is(evsel, ELISION_START) &&
- runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
+ if (runtime_cycles_in_tx_stats[ctx][cpu].n != 0)
+ print_metric(ctxp, NULL, "%8.0f",
+ "cycles / transaction", ratio);
+ else
+ print_metric(ctxp, NULL, NULL, "cycles / transaction",
+ 0);
+ } else if (perf_stat_evsel__is(evsel, ELISION_START)) {
total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
if (avg)
ratio = total / avg;
- fprintf(out, " # %8.0f cycles / elision ", ratio);
+ print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio);
} else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) {
if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
- fprintf(out, " # %8.3f CPUs utilized ", avg / ratio);
+ print_metric(ctxp, NULL, "%8.3f", "CPUs utilized",
+ avg / ratio);
else
- fprintf(out, " ");
+ print_metric(ctxp, NULL, NULL, "CPUs utilized", 0);
} else if (runtime_nsecs_stats[cpu].n != 0) {
char unit = 'M';
+ char unit_buf[10];
total = avg_stats(&runtime_nsecs_stats[cpu]);
@@ -429,9 +464,9 @@ void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel,
ratio *= 1000;
unit = 'K';
}
-
- fprintf(out, " # %8.3f %c/sec ", ratio, unit);
+ snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
+ print_metric(ctxp, NULL, "%8.3f", unit_buf, ratio);
} else {
- fprintf(out, " ");
+ print_metric(ctxp, NULL, NULL, NULL, 0);
}
}
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index afb0c45eba34..4d9b481cf3b6 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -97,7 +97,7 @@ void perf_stat_evsel_id_init(struct perf_evsel *evsel)
}
}
-void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
+static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
{
int i;
struct perf_stat_evsel *ps = evsel->priv;
@@ -108,7 +108,7 @@ void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
perf_stat_evsel_id_init(evsel);
}
-int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
+static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
{
evsel->priv = zalloc(sizeof(struct perf_stat_evsel));
if (evsel->priv == NULL)
@@ -117,13 +117,13 @@ int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
return 0;
}
-void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
+static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
{
zfree(&evsel->priv);
}
-int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel,
- int ncpus, int nthreads)
+static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel,
+ int ncpus, int nthreads)
{
struct perf_counts *counts;
@@ -134,13 +134,13 @@ int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel,
return counts ? 0 : -ENOMEM;
}
-void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
+static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
{
perf_counts__delete(evsel->prev_raw_counts);
evsel->prev_raw_counts = NULL;
}
-int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw)
+static int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw)
{
int ncpus = perf_evsel__nr_cpus(evsel);
int nthreads = thread_map__nr(evsel->threads);
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index 086f4e128d63..0150e786ccc7 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -68,21 +68,23 @@ void perf_stat_evsel_id_init(struct perf_evsel *evsel);
extern struct stats walltime_nsecs_stats;
+typedef void (*print_metric_t)(void *ctx, const char *color, const char *unit,
+ const char *fmt, double val);
+typedef void (*new_line_t )(void *ctx);
+
+void perf_stat__init_shadow_stats(void);
void perf_stat__reset_shadow_stats(void);
void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
int cpu);
-void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel,
- double avg, int cpu, enum aggr_mode aggr);
-
-void perf_evsel__reset_stat_priv(struct perf_evsel *evsel);
-int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel);
-void perf_evsel__free_stat_priv(struct perf_evsel *evsel);
-
-int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel,
- int ncpus, int nthreads);
-void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel);
+struct perf_stat_output_ctx {
+ void *ctx;
+ print_metric_t print_metric;
+ new_line_t new_line;
+};
-int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw);
+void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
+ double avg, int cpu,
+ struct perf_stat_output_ctx *out);
int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw);
void perf_evlist__free_stats(struct perf_evlist *evlist);
diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c
index 25671fa16618..d3d279275432 100644
--- a/tools/perf/util/strbuf.c
+++ b/tools/perf/util/strbuf.c
@@ -51,30 +51,6 @@ void strbuf_grow(struct strbuf *sb, size_t extra)
ALLOC_GROW(sb->buf, sb->len + extra + 1, sb->alloc);
}
-static void strbuf_splice(struct strbuf *sb, size_t pos, size_t len,
- const void *data, size_t dlen)
-{
- if (pos + len < pos)
- die("you want to use way too much memory");
- if (pos > sb->len)
- die("`pos' is too far after the end of the buffer");
- if (pos + len > sb->len)
- die("`pos + len' is too far after the end of the buffer");
-
- if (dlen >= len)
- strbuf_grow(sb, dlen - len);
- memmove(sb->buf + pos + dlen,
- sb->buf + pos + len,
- sb->len - pos - len);
- memcpy(sb->buf + pos, data, dlen);
- strbuf_setlen(sb, sb->len + dlen - len);
-}
-
-void strbuf_remove(struct strbuf *sb, size_t pos, size_t len)
-{
- strbuf_splice(sb, pos, len, NULL, 0);
-}
-
void strbuf_add(struct strbuf *sb, const void *data, size_t len)
{
strbuf_grow(sb, len);
diff --git a/tools/perf/util/strbuf.h b/tools/perf/util/strbuf.h
index 529f2f035249..7a32c838884d 100644
--- a/tools/perf/util/strbuf.h
+++ b/tools/perf/util/strbuf.h
@@ -77,8 +77,6 @@ static inline void strbuf_addch(struct strbuf *sb, int c) {
sb->buf[sb->len] = '\0';
}
-extern void strbuf_remove(struct strbuf *, size_t pos, size_t len);
-
extern void strbuf_add(struct strbuf *, const void *, size_t);
static inline void strbuf_addstr(struct strbuf *sb, const char *s) {
strbuf_add(sb, s, strlen(s));
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 562b8ebeae5b..b1dd68f358fc 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -6,6 +6,7 @@
#include <inttypes.h>
#include "symbol.h"
+#include "demangle-java.h"
#include "machine.h"
#include "vdso.h"
#include <symbol/kallsyms.h>
@@ -1077,6 +1078,8 @@ new_symbol:
demangle_flags = DMGL_PARAMS | DMGL_ANSI;
demangled = bfd_demangle(NULL, elf_name, demangle_flags);
+ if (demangled == NULL)
+ demangled = java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET);
if (demangled != NULL)
elf_name = demangled;
}
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index ab02209a7cf3..e7588dc91518 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1466,7 +1466,8 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
* Read the build id if possible. This is required for
* DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
*/
- if (filename__read_build_id(dso->long_name, build_id, BUILD_ID_SIZE) > 0)
+ if (is_regular_file(name) &&
+ filename__read_build_id(dso->long_name, build_id, BUILD_ID_SIZE) > 0)
dso__set_build_id(dso, build_id);
/*
@@ -1487,6 +1488,9 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
root_dir, name, PATH_MAX))
continue;
+ if (!is_regular_file(name))
+ continue;
+
/* Name is now the name of the next image to try */
if (symsrc__init(ss, dso, name, symtab_type) < 0)
continue;
@@ -1525,6 +1529,10 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
if (!runtime_ss && syms_ss)
runtime_ss = syms_ss;
+ if (syms_ss && syms_ss->type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
+ if (dso__build_id_is_kmod(dso, name, PATH_MAX))
+ kmod = true;
+
if (syms_ss)
ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, kmod);
else
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index ccd1caa40e11..a937053a0ae0 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -110,7 +110,8 @@ struct symbol_conf {
has_filter,
show_ref_callgraph,
hide_unresolved,
- raw_trace;
+ raw_trace,
+ report_hierarchy;
const char *vmlinux_name,
*kallsyms_name,
*source_prefix,
diff --git a/tools/perf/util/trace-event.c b/tools/perf/util/trace-event.c
index 802bb868d446..8ae051e0ec79 100644
--- a/tools/perf/util/trace-event.c
+++ b/tools/perf/util/trace-event.c
@@ -10,6 +10,7 @@
#include <linux/err.h>
#include <traceevent/event-parse.h>
#include <api/fs/tracing_path.h>
+#include <api/fs/fs.h>
#include "trace-event.h"
#include "machine.h"
#include "util.h"
diff --git a/tools/perf/util/tsc.c b/tools/perf/util/tsc.c
index 4d4210d4e13d..1b741646eed0 100644
--- a/tools/perf/util/tsc.c
+++ b/tools/perf/util/tsc.c
@@ -19,7 +19,7 @@ u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc)
u64 quot, rem;
quot = cyc >> tc->time_shift;
- rem = cyc & ((1 << tc->time_shift) - 1);
+ rem = cyc & (((u64)1 << tc->time_shift) - 1);
return tc->time_zero + quot * tc->time_mult +
((rem * tc->time_mult) >> tc->time_shift);
}
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index ead9509835d2..b7766c577b01 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -14,6 +14,7 @@
#include <limits.h>
#include <byteswap.h>
#include <linux/kernel.h>
+#include <linux/log2.h>
#include <unistd.h>
#include "callchain.h"
#include "strlist.h"
@@ -507,54 +508,6 @@ int parse_callchain_record(const char *arg, struct callchain_param *param)
return ret;
}
-int filename__read_str(const char *filename, char **buf, size_t *sizep)
-{
- size_t size = 0, alloc_size = 0;
- void *bf = NULL, *nbf;
- int fd, n, err = 0;
- char sbuf[STRERR_BUFSIZE];
-
- fd = open(filename, O_RDONLY);
- if (fd < 0)
- return -errno;
-
- do {
- if (size == alloc_size) {
- alloc_size += BUFSIZ;
- nbf = realloc(bf, alloc_size);
- if (!nbf) {
- err = -ENOMEM;
- break;
- }
-
- bf = nbf;
- }
-
- n = read(fd, bf + size, alloc_size - size);
- if (n < 0) {
- if (size) {
- pr_warning("read failed %d: %s\n", errno,
- strerror_r(errno, sbuf, sizeof(sbuf)));
- err = 0;
- } else
- err = -errno;
-
- break;
- }
-
- size += n;
- } while (n > 0);
-
- if (!err) {
- *sizep = size;
- *buf = bf;
- } else
- free(bf);
-
- close(fd);
- return err;
-}
-
const char *get_filename_for_perf_kvm(void)
{
const char *filename;
@@ -691,3 +644,66 @@ out:
return tip;
}
+
+bool is_regular_file(const char *file)
+{
+ struct stat st;
+
+ if (stat(file, &st))
+ return false;
+
+ return S_ISREG(st.st_mode);
+}
+
+int fetch_current_timestamp(char *buf, size_t sz)
+{
+ struct timeval tv;
+ struct tm tm;
+ char dt[32];
+
+ if (gettimeofday(&tv, NULL) || !localtime_r(&tv.tv_sec, &tm))
+ return -1;
+
+ if (!strftime(dt, sizeof(dt), "%Y%m%d%H%M%S", &tm))
+ return -1;
+
+ scnprintf(buf, sz, "%s%02u", dt, (unsigned)tv.tv_usec / 10000);
+
+ return 0;
+}
+
+void print_binary(unsigned char *data, size_t len,
+ size_t bytes_per_line, print_binary_t printer,
+ void *extra)
+{
+ size_t i, j, mask;
+
+ if (!printer)
+ return;
+
+ bytes_per_line = roundup_pow_of_two(bytes_per_line);
+ mask = bytes_per_line - 1;
+
+ printer(BINARY_PRINT_DATA_BEGIN, 0, extra);
+ for (i = 0; i < len; i++) {
+ if ((i & mask) == 0) {
+ printer(BINARY_PRINT_LINE_BEGIN, -1, extra);
+ printer(BINARY_PRINT_ADDR, i, extra);
+ }
+
+ printer(BINARY_PRINT_NUM_DATA, data[i], extra);
+
+ if (((i & mask) == mask) || i == len - 1) {
+ for (j = 0; j < mask-(i & mask); j++)
+ printer(BINARY_PRINT_NUM_PAD, -1, extra);
+
+ printer(BINARY_PRINT_SEP, i, extra);
+ for (j = i & ~mask; j <= i; j++)
+ printer(BINARY_PRINT_CHAR_DATA, data[j], extra);
+ for (j = 0; j < mask-(i & mask); j++)
+ printer(BINARY_PRINT_CHAR_PAD, i, extra);
+ printer(BINARY_PRINT_LINE_END, -1, extra);
+ }
+ }
+ printer(BINARY_PRINT_DATA_END, -1, extra);
+}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index fe915e616f9b..d0d50cef8b2a 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -82,6 +82,8 @@
extern const char *graph_line;
extern const char *graph_dotted_line;
+extern const char *spaces;
+extern const char *dots;
extern char buildid_dir[];
/* On most systems <limits.h> would have given us this, but
@@ -303,7 +305,6 @@ char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
bool show_sym, bool unwind_inlines);
void free_srcline(char *srcline);
-int filename__read_str(const char *filename, char **buf, size_t *sizep);
int perf_event_paranoid(void);
void mem_bswap_64(void *src, int byte_size);
@@ -343,5 +344,27 @@ int fetch_kernel_version(unsigned int *puint,
#define KVER_PARAM(x) KVER_VERSION(x), KVER_PATCHLEVEL(x), KVER_SUBLEVEL(x)
const char *perf_tip(const char *dirpath);
+bool is_regular_file(const char *file);
+int fetch_current_timestamp(char *buf, size_t sz);
+
+enum binary_printer_ops {
+ BINARY_PRINT_DATA_BEGIN,
+ BINARY_PRINT_LINE_BEGIN,
+ BINARY_PRINT_ADDR,
+ BINARY_PRINT_NUM_DATA,
+ BINARY_PRINT_NUM_PAD,
+ BINARY_PRINT_SEP,
+ BINARY_PRINT_CHAR_DATA,
+ BINARY_PRINT_CHAR_PAD,
+ BINARY_PRINT_LINE_END,
+ BINARY_PRINT_DATA_END,
+};
+
+typedef void (*print_binary_t)(enum binary_printer_ops,
+ unsigned int val,
+ void *extra);
+void print_binary(unsigned char *data, size_t len,
+ size_t bytes_per_line, print_binary_t printer,
+ void *extra);
#endif /* GIT_COMPAT_UTIL_H */
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 0dac7e05a6ac..3fa94e291d16 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -1970,7 +1970,7 @@ int has_config_tdp(unsigned int family, unsigned int model)
}
static void
-dump_cstate_pstate_config_info(family, model)
+dump_cstate_pstate_config_info(unsigned int family, unsigned int model)
{
if (!do_nhm_platform_info)
return;
@@ -2142,7 +2142,7 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data
#define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */
#define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */
-double get_tdp(model)
+double get_tdp(unsigned int model)
{
unsigned long long msr;
@@ -2256,7 +2256,7 @@ void rapl_probe(unsigned int family, unsigned int model)
return;
}
-void perf_limit_reasons_probe(family, model)
+void perf_limit_reasons_probe(unsigned int family, unsigned int model)
{
if (!genuine_intel)
return;
@@ -2792,7 +2792,7 @@ void process_cpuid()
perf_limit_reasons_probe(family, model);
if (debug)
- dump_cstate_pstate_config_info();
+ dump_cstate_pstate_config_info(family, model);
if (has_skl_msrs(family, model))
calculate_tsc_tweak();