summaryrefslogtreecommitdiff
path: root/tools/perf/util
diff options
context:
space:
mode:
authorNamhyung Kim <namhyung@kernel.org>2024-04-30 01:57:38 +0300
committerArnaldo Carvalho de Melo <acme@redhat.com>2024-05-02 22:35:47 +0300
commit3cdd98b42d212160d7aae746a97960a4595cbfc2 (patch)
treef1dab900e645c094567b21e735656f45abe17bf9 /tools/perf/util
parente3123079b906dc2edb80466de85b2c333a56fbf2 (diff)
downloadlinux-3cdd98b42d212160d7aae746a97960a4595cbfc2.tar.xz
perf maps: Remove check_invariants() from maps__lock()
I found that the debug build was a slowed down a lot by the maps lock code since it checks the invariants whenever it gets the pointer to the lock. This means it checks twice the invariants before and after the access. Instead, let's move the checking code within the lock area but after any modification and remove it from the read paths. This would remove (more than) half of the maps lock overhead. The time for perf report with a huge data file (200k+ of MMAP2 events). Non-debug Before After --------- -------- -------- 2m 43s 6m 45s 4m 21s Reviewed-by: Ian Rogers <irogers@google.com> Signed-off-by: Namhyung Kim <namhyung@kernel.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: https://lore.kernel.org/r/20240429225738.1491791-1-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util')
-rw-r--r--tools/perf/util/maps.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c
index ce13145a9f8e..ac9fb880ddc7 100644
--- a/tools/perf/util/maps.c
+++ b/tools/perf/util/maps.c
@@ -211,11 +211,6 @@ void maps__set_unwind_libunwind_ops(struct maps *maps, const struct unwind_libun
static struct rw_semaphore *maps__lock(struct maps *maps)
{
- /*
- * When the lock is acquired or released the maps invariants should
- * hold.
- */
- check_invariants(maps);
return &RC_CHK_ACCESS(maps)->lock;
}
@@ -358,6 +353,7 @@ static int map__strcmp(const void *a, const void *b)
static int maps__sort_by_name(struct maps *maps)
{
int err = 0;
+
down_write(maps__lock(maps));
if (!maps__maps_by_name_sorted(maps)) {
struct map **maps_by_name = maps__maps_by_name(maps);
@@ -384,6 +380,7 @@ static int maps__sort_by_name(struct maps *maps)
maps__set_maps_by_name_sorted(maps, true);
}
}
+ check_invariants(maps);
up_write(maps__lock(maps));
return err;
}
@@ -502,6 +499,7 @@ int maps__insert(struct maps *maps, struct map *map)
down_write(maps__lock(maps));
ret = __maps__insert(maps, map);
+ check_invariants(maps);
up_write(maps__lock(maps));
return ret;
}
@@ -536,6 +534,7 @@ void maps__remove(struct maps *maps, struct map *map)
{
down_write(maps__lock(maps));
__maps__remove(maps, map);
+ check_invariants(maps);
up_write(maps__lock(maps));
}
@@ -602,6 +601,7 @@ void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data
else
i++;
}
+ check_invariants(maps);
up_write(maps__lock(maps));
}
@@ -942,6 +942,8 @@ int maps__copy_from(struct maps *dest, struct maps *parent)
map__put(new);
}
}
+ check_invariants(dest);
+
up_read(maps__lock(parent));
up_write(maps__lock(dest));
return err;
@@ -1097,6 +1099,7 @@ void maps__fixup_end(struct maps *maps)
map__set_end(maps_by_address[n - 1], ~0ULL);
RC_CHK_ACCESS(maps)->ends_broken = false;
+ check_invariants(maps);
up_write(maps__lock(maps));
}
@@ -1147,6 +1150,8 @@ int maps__merge_in(struct maps *kmaps, struct map *new_map)
map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) {
/* No overlap so regular insert suffices. */
int ret = __maps__insert(kmaps, new_map);
+
+ check_invariants(kmaps);
up_write(maps__lock(kmaps));
return ret;
}
@@ -1184,6 +1189,7 @@ int maps__merge_in(struct maps *kmaps, struct map *new_map)
map__zput(kmaps_maps_by_address[i]);
free(kmaps_maps_by_address);
+ check_invariants(kmaps);
up_write(maps__lock(kmaps));
return 0;
}