summaryrefslogtreecommitdiff
path: root/tools/perf
diff options
context:
space:
mode:
authorIan Rogers <irogers@google.com>2024-05-05 00:37:57 +0300
committerArnaldo Carvalho de Melo <acme@redhat.com>2024-05-06 15:13:11 +0300
commit3f4ac23a990853ab5012037767281dfd4beb4b15 (patch)
tree04b24f1ce0a7b60a7c8f667d7aed418c8d78874c /tools/perf
parent77a70f80751da3a673232e7bff3f71d8c3995eff (diff)
downloadlinux-3f4ac23a990853ab5012037767281dfd4beb4b15.tar.xz
perf dsos: Switch backing storage to array from rbtree/list
DSOs were held on a list for fast iteration and in an rbtree for fast finds. Switch to using a lazily sorted array where iteration is just iterating through the array and binary searches are the same complexity as searching the rbtree. The find may need to sort the array first which does increase the complexity, but add operations have lower complexity and overall the complexity should remain about the same. The set name operations on the dso just records that the array is no longer sorted, avoiding complexity in rebalancing the rbtree. Tighter locking discipline is enforced to avoid the array being resorted while long and short names or ids are changed. The array is smaller in size, replacing 6 pointers with 2, and so even with extra allocated space in the array, the array may be 50% unoccupied, the memory saving should be at least 2x. Committer testing: On a previous version of this patchset we were getting a lot of warnings about deleting a DSO still on a list, now it is ok: root@x1:~# perf probe -l root@x1:~# perf probe finish_task_switch Added new event: probe:finish_task_switch (on finish_task_switch) You can now use it in all perf tools, such as: perf record -e probe:finish_task_switch -aR sleep 1 root@x1:~# perf probe -l probe:finish_task_switch (on finish_task_switch@kernel/sched/core.c) root@x1:~# perf trace -e probe:finish_task_switch/max-stack=8/ --max-events=1 0.000 migration/0/19 probe:finish_task_switch(__probe_ip: -1894408688) finish_task_switch.isra.0 ([kernel.kallsyms]) __schedule ([kernel.kallsyms]) schedule ([kernel.kallsyms]) smpboot_thread_fn ([kernel.kallsyms]) kthread ([kernel.kallsyms]) ret_from_fork ([kernel.kallsyms]) ret_from_fork_asm ([kernel.kallsyms]) root@x1:~# root@x1:~# perf probe -d probe:* Removed event: probe:finish_task_switch root@x1:~# perf probe -l root@x1:~# I also ran the full 'perf test' suite after applying this one, no regressions. Signed-off-by: Ian Rogers <irogers@google.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Ahelenia ZiemiaƄska <nabijaczleweli@nabijaczleweli.xyz> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Ben Gainey <ben.gainey@arm.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Chengen Du <chengen.du@canonical.com> Cc: Colin Ian King <colin.i.king@gmail.com> Cc: Dima Kogan <dima@secretsauce.net> Cc: Ilkka Koskinen <ilkka@os.amperecomputing.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linux.dev> Cc: Li Dong <lidong@vivo.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Paran Lee <p4ranlee@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Song Liu <song@kernel.org> Cc: Sun Haiyong <sunhaiyong@loongson.cn> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Tiezhu Yang <yangtiezhu@loongson.cn> Cc: Yanteng Si <siyanteng@loongson.cn> Cc: zhaimingbing <zhaimingbing@cmss.chinamobile.com> Link: https://lore.kernel.org/r/20240504213803.218974-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf')
-rw-r--r--tools/perf/util/dso.c67
-rw-r--r--tools/perf/util/dso.h10
-rw-r--r--tools/perf/util/dsos.c188
-rw-r--r--tools/perf/util/dsos.h21
4 files changed, 177 insertions, 109 deletions
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index ad562743d769..3caca60a6ce3 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -1241,35 +1241,35 @@ struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
return dso;
}
-static void dso__set_long_name_id(struct dso *dso, const char *name, struct dso_id *id, bool name_allocated)
+static void dso__set_long_name_id(struct dso *dso, const char *name, bool name_allocated)
{
- struct rb_root *root = dso->root;
+ struct dsos *dsos = dso->dsos;
if (name == NULL)
return;
- if (dso->long_name_allocated)
- free((char *)dso->long_name);
-
- if (root) {
- rb_erase(&dso->rb_node, root);
+ if (dsos) {
/*
- * __dsos__findnew_link_by_longname_id() isn't guaranteed to
- * add it back, so a clean removal is required here.
+ * Need to avoid re-sorting the dsos breaking by non-atomically
+ * renaming the dso.
*/
- RB_CLEAR_NODE(&dso->rb_node);
- dso->root = NULL;
+ down_write(&dsos->lock);
}
+ if (dso->long_name_allocated)
+ free((char *)dso->long_name);
+
dso->long_name = name;
dso->long_name_len = strlen(name);
dso->long_name_allocated = name_allocated;
- if (root)
- __dsos__findnew_link_by_longname_id(root, dso, NULL, id);
+ if (dsos) {
+ dsos->sorted = false;
+ up_write(&dsos->lock);
+ }
}
-static int __dso_id__cmp(struct dso_id *a, struct dso_id *b)
+static int __dso_id__cmp(const struct dso_id *a, const struct dso_id *b)
{
if (a->maj > b->maj) return -1;
if (a->maj < b->maj) return 1;
@@ -1297,7 +1297,7 @@ static int __dso_id__cmp(struct dso_id *a, struct dso_id *b)
return 0;
}
-bool dso_id__empty(struct dso_id *id)
+bool dso_id__empty(const struct dso_id *id)
{
if (!id)
return true;
@@ -1305,15 +1305,22 @@ bool dso_id__empty(struct dso_id *id)
return !id->maj && !id->min && !id->ino && !id->ino_generation;
}
-void dso__inject_id(struct dso *dso, struct dso_id *id)
+void __dso__inject_id(struct dso *dso, struct dso_id *id)
{
+ struct dsos *dsos = dso->dsos;
+
+ /* dsos write lock held by caller. */
+
dso->id.maj = id->maj;
dso->id.min = id->min;
dso->id.ino = id->ino;
dso->id.ino_generation = id->ino_generation;
+
+ if (dsos)
+ dsos->sorted = false;
}
-int dso_id__cmp(struct dso_id *a, struct dso_id *b)
+int dso_id__cmp(const struct dso_id *a, const struct dso_id *b)
{
/*
* The second is always dso->id, so zeroes if not set, assume passing
@@ -1332,20 +1339,34 @@ int dso__cmp_id(struct dso *a, struct dso *b)
void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
{
- dso__set_long_name_id(dso, name, NULL, name_allocated);
+ dso__set_long_name_id(dso, name, name_allocated);
}
void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
{
+ struct dsos *dsos = dso->dsos;
+
if (name == NULL)
return;
+ if (dsos) {
+ /*
+ * Need to avoid re-sorting the dsos breaking by non-atomically
+ * renaming the dso.
+ */
+ down_write(&dsos->lock);
+ }
if (dso->short_name_allocated)
free((char *)dso->short_name);
dso->short_name = name;
dso->short_name_len = strlen(name);
dso->short_name_allocated = name_allocated;
+
+ if (dsos) {
+ dsos->sorted = false;
+ up_write(&dsos->lock);
+ }
}
int dso__name_len(const struct dso *dso)
@@ -1381,7 +1402,7 @@ struct dso *dso__new_id(const char *name, struct dso_id *id)
strcpy(dso->name, name);
if (id)
dso->id = *id;
- dso__set_long_name_id(dso, dso->name, id, false);
+ dso__set_long_name_id(dso, dso->name, false);
dso__set_short_name(dso, dso->name, false);
dso->symbols = RB_ROOT_CACHED;
dso->symbol_names = NULL;
@@ -1406,9 +1427,6 @@ struct dso *dso__new_id(const char *name, struct dso_id *id)
dso->is_kmod = 0;
dso->needs_swap = DSO_SWAP__UNSET;
dso->comp = COMP_ID__NONE;
- RB_CLEAR_NODE(&dso->rb_node);
- dso->root = NULL;
- INIT_LIST_HEAD(&dso->node);
INIT_LIST_HEAD(&dso->data.open_entry);
mutex_init(&dso->lock);
refcount_set(&dso->refcnt, 1);
@@ -1424,9 +1442,8 @@ struct dso *dso__new(const char *name)
void dso__delete(struct dso *dso)
{
- if (!RB_EMPTY_NODE(&dso->rb_node))
- pr_err("DSO %s is still in rbtree when being deleted!\n",
- dso->long_name);
+ if (dso->dsos)
+ pr_err("DSO %s is still in rbtree when being deleted!\n", dso->long_name);
/* free inlines first, as they reference symbols */
inlines__tree_delete(&dso->inlined_nodes);
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 2c295438226d..b22dec8b3f3a 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -146,9 +146,7 @@ struct auxtrace_cache;
struct dso {
struct mutex lock;
- struct list_head node;
- struct rb_node rb_node; /* rbtree node sorted by long name */
- struct rb_root *root; /* root of rbtree that rb_node is in */
+ struct dsos *dsos;
struct rb_root_cached symbols;
struct symbol **symbol_names;
size_t symbol_names_len;
@@ -238,8 +236,8 @@ static inline void dso__set_loaded(struct dso *dso)
dso->loaded = true;
}
-int dso_id__cmp(struct dso_id *a, struct dso_id *b);
-bool dso_id__empty(struct dso_id *id);
+int dso_id__cmp(const struct dso_id *a, const struct dso_id *b);
+bool dso_id__empty(const struct dso_id *id);
struct dso *dso__new_id(const char *name, struct dso_id *id);
struct dso *dso__new(const char *name);
@@ -248,7 +246,7 @@ void dso__delete(struct dso *dso);
int dso__cmp_id(struct dso *a, struct dso *b);
void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated);
void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated);
-void dso__inject_id(struct dso *dso, struct dso_id *id);
+void __dso__inject_id(struct dso *dso, struct dso_id *id);
int dso__name_len(const struct dso *dso);
diff --git a/tools/perf/util/dsos.c b/tools/perf/util/dsos.c
index b7fbfb877ae3..c01f0b538e65 100644
--- a/tools/perf/util/dsos.c
+++ b/tools/perf/util/dsos.c
@@ -14,24 +14,30 @@
void dsos__init(struct dsos *dsos)
{
- INIT_LIST_HEAD(&dsos->head);
- dsos->root = RB_ROOT;
init_rwsem(&dsos->lock);
+
+ dsos->cnt = 0;
+ dsos->allocated = 0;
+ dsos->dsos = NULL;
+ dsos->sorted = true;
}
static void dsos__purge(struct dsos *dsos)
{
- struct dso *pos, *n;
-
down_write(&dsos->lock);
- list_for_each_entry_safe(pos, n, &dsos->head, node) {
- RB_CLEAR_NODE(&pos->rb_node);
- pos->root = NULL;
- list_del_init(&pos->node);
- dso__put(pos);
+ for (unsigned int i = 0; i < dsos->cnt; i++) {
+ struct dso *dso = dsos->dsos[i];
+
+ dso->dsos = NULL;
+ dso__put(dso);
}
+ zfree(&dsos->dsos);
+ dsos->cnt = 0;
+ dsos->allocated = 0;
+ dsos->sorted = true;
+
up_write(&dsos->lock);
}
@@ -46,9 +52,8 @@ static int __dsos__for_each_dso(struct dsos *dsos,
int (*cb)(struct dso *dso, void *data),
void *data)
{
- struct dso *dso;
-
- list_for_each_entry(dso, &dsos->head, node) {
+ for (unsigned int i = 0; i < dsos->cnt; i++) {
+ struct dso *dso = dsos->dsos[i];
int err;
err = cb(dso, data);
@@ -119,16 +124,47 @@ static int dso__cmp_short_name(struct dso *a, struct dso *b)
return __dso__cmp_short_name(a->short_name, &a->id, b);
}
+static int dsos__cmp_long_name_id_short_name(const void *va, const void *vb)
+{
+ const struct dso *a = *((const struct dso **)va);
+ const struct dso *b = *((const struct dso **)vb);
+ int rc = strcmp(a->long_name, b->long_name);
+
+ if (!rc) {
+ rc = dso_id__cmp(&a->id, &b->id);
+ if (!rc)
+ rc = strcmp(a->short_name, b->short_name);
+ }
+ return rc;
+}
+
/*
* Find a matching entry and/or link current entry to RB tree.
* Either one of the dso or name parameter must be non-NULL or the
* function will not work.
*/
-struct dso *__dsos__findnew_link_by_longname_id(struct rb_root *root, struct dso *dso,
- const char *name, struct dso_id *id)
+struct dso *__dsos__findnew_link_by_longname_id(struct dsos *dsos,
+ struct dso *dso,
+ const char *name,
+ struct dso_id *id,
+ bool write_locked)
{
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent = NULL;
+ int low = 0, high = dsos->cnt - 1;
+
+ if (!dsos->sorted) {
+ if (!write_locked) {
+ up_read(&dsos->lock);
+ down_write(&dsos->lock);
+ dso = __dsos__findnew_link_by_longname_id(dsos, dso, name, id,
+ /*write_locked=*/true);
+ up_write(&dsos->lock);
+ down_read(&dsos->lock);
+ return dso;
+ }
+ qsort(dsos->dsos, dsos->cnt, sizeof(struct dso *),
+ dsos__cmp_long_name_id_short_name);
+ dsos->sorted = true;
+ }
if (!name)
name = dso->long_name;
@@ -136,11 +172,11 @@ struct dso *__dsos__findnew_link_by_longname_id(struct rb_root *root, struct dso
/*
* Find node with the matching name
*/
- while (*p) {
- struct dso *this = rb_entry(*p, struct dso, rb_node);
+ while (low <= high) {
+ int mid = (low + high) / 2;
+ struct dso *this = dsos->dsos[mid];
int rc = __dso__cmp_long_name(name, id, this);
- parent = *p;
if (rc == 0) {
/*
* In case the new DSO is a duplicate of an existing
@@ -161,56 +197,53 @@ struct dso *__dsos__findnew_link_by_longname_id(struct rb_root *root, struct dso
}
}
if (rc < 0)
- p = &parent->rb_left;
+ high = mid - 1;
else
- p = &parent->rb_right;
- }
- if (dso) {
- /* Add new node and rebalance tree */
- rb_link_node(&dso->rb_node, parent, p);
- rb_insert_color(&dso->rb_node, root);
- dso->root = root;
+ low = mid + 1;
}
+ if (dso)
+ __dsos__add(dsos, dso);
return NULL;
}
-void __dsos__add(struct dsos *dsos, struct dso *dso)
+int __dsos__add(struct dsos *dsos, struct dso *dso)
{
- list_add_tail(&dso->node, &dsos->head);
- __dsos__findnew_link_by_longname_id(&dsos->root, dso, NULL, &dso->id);
- /*
- * It is now in the linked list, grab a reference, then garbage collect
- * this when needing memory, by looking at LRU dso instances in the
- * list with atomic_read(&dso->refcnt) == 1, i.e. no references
- * anywhere besides the one for the list, do, under a lock for the
- * list: remove it from the list, then a dso__put(), that probably will
- * be the last and will then call dso__delete(), end of life.
- *
- * That, or at the end of the 'struct machine' lifetime, when all
- * 'struct dso' instances will be removed from the list, in
- * dsos__exit(), if they have no other reference from some other data
- * structure.
- *
- * E.g.: after processing a 'perf.data' file and storing references
- * to objects instantiated while processing events, we will have
- * references to the 'thread', 'map', 'dso' structs all from 'struct
- * hist_entry' instances, but we may not need anything not referenced,
- * so we might as well call machines__exit()/machines__delete() and
- * garbage collect it.
- */
- dso__get(dso);
+ if (dsos->cnt == dsos->allocated) {
+ unsigned int to_allocate = 2;
+ struct dso **temp;
+
+ if (dsos->allocated > 0)
+ to_allocate = dsos->allocated * 2;
+ temp = realloc(dsos->dsos, sizeof(struct dso *) * to_allocate);
+ if (!temp)
+ return -ENOMEM;
+ dsos->dsos = temp;
+ dsos->allocated = to_allocate;
+ }
+ dsos->dsos[dsos->cnt++] = dso__get(dso);
+ if (dsos->cnt >= 2 && dsos->sorted) {
+ dsos->sorted = dsos__cmp_long_name_id_short_name(&dsos->dsos[dsos->cnt - 2],
+ &dsos->dsos[dsos->cnt - 1])
+ <= 0;
+ }
+ dso->dsos = dsos;
+ return 0;
}
-void dsos__add(struct dsos *dsos, struct dso *dso)
+int dsos__add(struct dsos *dsos, struct dso *dso)
{
+ int ret;
+
down_write(&dsos->lock);
- __dsos__add(dsos, dso);
+ ret = __dsos__add(dsos, dso);
up_write(&dsos->lock);
+ return ret;
}
-static struct dso *__dsos__findnew_by_longname_id(struct rb_root *root, const char *name, struct dso_id *id)
+static struct dso *__dsos__findnew_by_longname_id(struct dsos *dsos, const char *name,
+ struct dso_id *id, bool write_locked)
{
- return __dsos__findnew_link_by_longname_id(root, NULL, name, id);
+ return __dsos__findnew_link_by_longname_id(dsos, NULL, name, id, write_locked);
}
struct dsos__find_id_cb_args {
@@ -231,7 +264,8 @@ static int dsos__find_id_cb(struct dso *dso, void *data)
}
-static struct dso *__dsos__find_id(struct dsos *dsos, const char *name, struct dso_id *id, bool cmp_short)
+static struct dso *__dsos__find_id(struct dsos *dsos, const char *name, struct dso_id *id,
+ bool cmp_short, bool write_locked)
{
struct dso *res;
@@ -245,7 +279,7 @@ static struct dso *__dsos__find_id(struct dsos *dsos, const char *name, struct d
__dsos__for_each_dso(dsos, dsos__find_id_cb, &args);
return args.res;
}
- res = __dsos__findnew_by_longname_id(&dsos->root, name, id);
+ res = __dsos__findnew_by_longname_id(dsos, name, id, write_locked);
return res;
}
@@ -254,7 +288,7 @@ struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
struct dso *res;
down_read(&dsos->lock);
- res = __dsos__find_id(dsos, name, NULL, cmp_short);
+ res = __dsos__find_id(dsos, name, NULL, cmp_short, /*write_locked=*/false);
up_read(&dsos->lock);
return res;
}
@@ -296,8 +330,13 @@ static struct dso *__dsos__addnew_id(struct dsos *dsos, const char *name, struct
struct dso *dso = dso__new_id(name, id);
if (dso != NULL) {
- __dsos__add(dsos, dso);
+ /*
+ * The dsos lock is held on entry, so rename the dso before
+ * adding it to avoid needing to take the dsos lock again to say
+ * the array isn't sorted.
+ */
dso__set_basename(dso);
+ __dsos__add(dsos, dso);
}
return dso;
}
@@ -309,10 +348,10 @@ struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
static struct dso *__dsos__findnew_id(struct dsos *dsos, const char *name, struct dso_id *id)
{
- struct dso *dso = __dsos__find_id(dsos, name, id, false);
+ struct dso *dso = __dsos__find_id(dsos, name, id, false, /*write_locked=*/true);
if (dso && dso_id__empty(&dso->id) && !dso_id__empty(id))
- dso__inject_id(dso, id);
+ __dso__inject_id(dso, id);
return dso ? dso : __dsos__addnew_id(dsos, name, id);
}
@@ -403,18 +442,27 @@ struct dso *dsos__findnew_module_dso(struct dsos *dsos,
down_write(&dsos->lock);
- dso = __dsos__find_id(dsos, m->name, NULL, /*cmp_short=*/true);
+ dso = __dsos__find_id(dsos, m->name, NULL, /*cmp_short=*/true, /*write_locked=*/true);
+ if (dso) {
+ up_write(&dsos->lock);
+ return dso;
+ }
+ /*
+ * Failed to find the dso so create it. Change the name before adding it
+ * to the array, to avoid unnecessary sorts and potential locking
+ * issues.
+ */
+ dso = dso__new_id(m->name, /*id=*/NULL);
if (!dso) {
- dso = __dsos__addnew(dsos, m->name);
- if (dso == NULL)
- goto out_unlock;
-
- dso__set_module_info(dso, m, machine);
- dso__set_long_name(dso, strdup(filename), true);
- dso->kernel = DSO_SPACE__KERNEL;
+ up_write(&dsos->lock);
+ return NULL;
}
+ dso__set_basename(dso);
+ dso__set_module_info(dso, m, machine);
+ dso__set_long_name(dso, strdup(filename), true);
+ dso->kernel = DSO_SPACE__KERNEL;
+ __dsos__add(dsos, dso);
-out_unlock:
up_write(&dsos->lock);
return dso;
}
diff --git a/tools/perf/util/dsos.h b/tools/perf/util/dsos.h
index 50bd51523475..c1b3979ad4bd 100644
--- a/tools/perf/util/dsos.h
+++ b/tools/perf/util/dsos.h
@@ -14,20 +14,22 @@ struct kmod_path;
struct machine;
/*
- * DSOs are put into both a list for fast iteration and rbtree for fast
- * long name lookup.
+ * Collection of DSOs as an array for iteration speed, but sorted for O(n)
+ * lookup.
*/
struct dsos {
- struct list_head head;
- struct rb_root root; /* rbtree root sorted by long name */
struct rw_semaphore lock;
+ struct dso **dsos;
+ unsigned int cnt;
+ unsigned int allocated;
+ bool sorted;
};
void dsos__init(struct dsos *dsos);
void dsos__exit(struct dsos *dsos);
-void __dsos__add(struct dsos *dsos, struct dso *dso);
-void dsos__add(struct dsos *dsos, struct dso *dso);
+int __dsos__add(struct dsos *dsos, struct dso *dso);
+int dsos__add(struct dsos *dsos, struct dso *dso);
struct dso *__dsos__addnew(struct dsos *dsos, const char *name);
struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short);
@@ -35,8 +37,11 @@ struct dso *dsos__findnew_id(struct dsos *dsos, const char *name, struct dso_id
bool dsos__read_build_ids(struct dsos *dsos, bool with_hits);
-struct dso *__dsos__findnew_link_by_longname_id(struct rb_root *root, struct dso *dso,
- const char *name, struct dso_id *id);
+struct dso *__dsos__findnew_link_by_longname_id(struct dsos *dsos,
+ struct dso *dso,
+ const char *name,
+ struct dso_id *id,
+ bool write_locked);
size_t dsos__fprintf_buildid(struct dsos *dsos, FILE *fp,
bool (skip)(struct dso *dso, int parm), int parm);