summaryrefslogtreecommitdiff
path: root/tools/perf/util/affinity.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-12-02 05:49:57 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2019-12-02 05:49:57 +0300
commitb7fcf31f7036895ca8fc3a30eefffab0e82f75f6 (patch)
treede2480af1cb8213dd32f56ab596759a76ec7bd57 /tools/perf/util/affinity.c
parent72c0870e3a05d9cd5466d08c3d2a3069ed0a2f9f (diff)
parente680a41fcaf07ccac8817c589fc4824988b48eac (diff)
downloadlinux-b7fcf31f7036895ca8fc3a30eefffab0e82f75f6.tar.xz
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar: - Make /sys/devices/cpu/rdpmc based RDPMC enforcement more instantaneous - decoder: Update the Intel opcode map - Various tooling fixes, including a few late optimizations and cleanups. * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) perf script: Fix invalid LBR/binary mismatch error perf script: Fix brstackinsn for AUXTRACE perf affinity: Add infrastructure to save/restore affinity perf pmu: Use file system cache to optimize sysfs access perf regs: Make perf_reg_name() return "unknown" instead of NULL perf diff: Use llabs() with 64-bit values perf diff: Use llabs() with 64-bit values perf/x86: Implement immediate enforcement of /sys/devices/cpu/rdpmc value of 0 perf tools: Allow to link with libbpf dynamicaly perf tests: Rename tests/map_groups.c to tests/maps.c perf tests: Rename thread-mg-share to thread-maps-share perf maps: Rename map_groups.h to maps.h perf maps: Rename 'mg' variables to 'maps' perf map_symbol: Rename ms->mg to ms->maps perf addr_location: Rename al->mg to al->maps perf thread: Rename thread->mg to thread->maps perf maps: Merge 'struct maps' with 'struct map_groups' x86/insn: perf tools: Add some more instructions to the new instructions test x86/insn: Add some more Intel instructions to the opcode map perf map: Remove unused functions ...
Diffstat (limited to 'tools/perf/util/affinity.c')
-rw-r--r--tools/perf/util/affinity.c73
1 files changed, 73 insertions, 0 deletions
diff --git a/tools/perf/util/affinity.c b/tools/perf/util/affinity.c
new file mode 100644
index 000000000000..a5e31f826828
--- /dev/null
+++ b/tools/perf/util/affinity.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Manage affinity to optimize IPIs inside the kernel perf API. */
+#define _GNU_SOURCE 1
+#include <sched.h>
+#include <stdlib.h>
+#include <linux/bitmap.h>
+#include <linux/zalloc.h>
+#include "perf.h"
+#include "cpumap.h"
+#include "affinity.h"
+
+static int get_cpu_set_size(void)
+{
+ int sz = cpu__max_cpu() + 8 - 1;
+ /*
+ * sched_getaffinity doesn't like masks smaller than the kernel.
+ * Hopefully that's big enough.
+ */
+ if (sz < 4096)
+ sz = 4096;
+ return sz / 8;
+}
+
+int affinity__setup(struct affinity *a)
+{
+ int cpu_set_size = get_cpu_set_size();
+
+ a->orig_cpus = bitmap_alloc(cpu_set_size * 8);
+ if (!a->orig_cpus)
+ return -1;
+ sched_getaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus);
+ a->sched_cpus = bitmap_alloc(cpu_set_size * 8);
+ if (!a->sched_cpus) {
+ zfree(&a->orig_cpus);
+ return -1;
+ }
+ bitmap_zero((unsigned long *)a->sched_cpus, cpu_set_size);
+ a->changed = false;
+ return 0;
+}
+
+/*
+ * perf_event_open does an IPI internally to the target CPU.
+ * It is more efficient to change perf's affinity to the target
+ * CPU and then set up all events on that CPU, so we amortize
+ * CPU communication.
+ */
+void affinity__set(struct affinity *a, int cpu)
+{
+ int cpu_set_size = get_cpu_set_size();
+
+ if (cpu == -1)
+ return;
+ a->changed = true;
+ set_bit(cpu, a->sched_cpus);
+ /*
+ * We ignore errors because affinity is just an optimization.
+ * This could happen for example with isolated CPUs or cpusets.
+ * In this case the IPIs inside the kernel's perf API still work.
+ */
+ sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->sched_cpus);
+ clear_bit(cpu, a->sched_cpus);
+}
+
+void affinity__cleanup(struct affinity *a)
+{
+ int cpu_set_size = get_cpu_set_size();
+
+ if (a->changed)
+ sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus);
+ zfree(&a->sched_cpus);
+ zfree(&a->orig_cpus);
+}