summaryrefslogtreecommitdiff
path: root/tools/perf/pmu-events/jevents.py
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/pmu-events/jevents.py')
-rwxr-xr-xtools/perf/pmu-events/jevents.py359
1 files changed, 296 insertions, 63 deletions
diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py
index 4c398e0eeb2f..2bcd07ce609f 100755
--- a/tools/perf/pmu-events/jevents.py
+++ b/tools/perf/pmu-events/jevents.py
@@ -3,6 +3,7 @@
"""Convert directories of JSON events to C code."""
import argparse
import csv
+from functools import lru_cache
import json
import metric
import os
@@ -12,30 +13,47 @@ import collections
# Global command line arguments.
_args = None
+# List of regular event tables.
+_event_tables = []
# List of event tables generated from "/sys" directories.
_sys_event_tables = []
+# List of regular metric tables.
+_metric_tables = []
+# List of metric tables generated from "/sys" directories.
+_sys_metric_tables = []
+# Mapping between sys event table names and sys metric table names.
+_sys_event_table_to_metric_table_mapping = {}
# Map from an event name to an architecture standard
# JsonEvent. Architecture standard events are in json files in the top
# f'{_args.starting_dir}/{_args.arch}' directory.
_arch_std_events = {}
-# Track whether an events table is currently being defined and needs closing.
-_close_table = False
# Events to write out when the table is closed
_pending_events = []
+# Name of events table to be written out
+_pending_events_tblname = None
+# Metrics to write out when the table is closed
+_pending_metrics = []
+# Name of metrics table to be written out
+_pending_metrics_tblname = None
# Global BigCString shared by all structures.
_bcs = None
# Order specific JsonEvent attributes will be visited.
_json_event_attributes = [
# cmp_sevent related attributes.
- 'name', 'pmu', 'topic', 'desc', 'metric_name', 'metric_group',
+ 'name', 'pmu', 'topic', 'desc',
# Seems useful, put it early.
'event',
# Short things in alphabetical order.
'aggr_mode', 'compat', 'deprecated', 'perpkg', 'unit',
# Longer things (the last won't be iterated over during decompress).
- 'metric_constraint', 'metric_expr', 'long_desc'
+ 'long_desc'
]
+# Attributes that are in pmu_metric rather than pmu_event.
+_json_metric_attributes = [
+ 'metric_name', 'metric_group', 'metric_constraint', 'metric_expr', 'desc',
+ 'long_desc', 'unit', 'compat', 'aggr_mode'
+]
def removesuffix(s: str, suffix: str) -> str:
"""Remove the suffix from a string
@@ -46,14 +64,16 @@ def removesuffix(s: str, suffix: str) -> str:
return s[0:-len(suffix)] if s.endswith(suffix) else s
-def file_name_to_table_name(parents: Sequence[str], dirname: str) -> str:
+def file_name_to_table_name(prefix: str, parents: Sequence[str],
+ dirname: str) -> str:
"""Generate a C table name from directory names."""
- tblname = 'pme'
+ tblname = prefix
for p in parents:
tblname += '_' + p
tblname += '_' + dirname
return tblname.replace('-', '_')
+
def c_len(s: str) -> int:
"""Return the length of s a C string
@@ -271,7 +291,7 @@ class JsonEvent:
self.metric_constraint = jd.get('MetricConstraint')
self.metric_expr = None
if 'MetricExpr' in jd:
- self.metric_expr = metric.ParsePerfJson(jd['MetricExpr']).Simplify()
+ self.metric_expr = metric.ParsePerfJson(jd['MetricExpr']).Simplify()
arch_std = jd.get('ArchStdEvent')
if precise and self.desc and '(Precise Event)' not in self.desc:
@@ -320,35 +340,46 @@ class JsonEvent:
s += f'\t{attr} = {value},\n'
return s + '}'
- def build_c_string(self) -> str:
+ def build_c_string(self, metric: bool) -> str:
s = ''
- for attr in _json_event_attributes:
+ for attr in _json_metric_attributes if metric else _json_event_attributes:
x = getattr(self, attr)
- if x and attr == 'metric_expr':
+ if metric and x and attr == 'metric_expr':
# Convert parsed metric expressions into a string. Slashes
# must be doubled in the file.
x = x.ToPerfJson().replace('\\', '\\\\')
s += f'{x}\\000' if x else '\\000'
return s
- def to_c_string(self) -> str:
+ def to_c_string(self, metric: bool) -> str:
"""Representation of the event as a C struct initializer."""
- s = self.build_c_string()
+ s = self.build_c_string(metric)
return f'{{ { _bcs.offsets[s] } }}, /* {s} */\n'
+@lru_cache(maxsize=None)
def read_json_events(path: str, topic: str) -> Sequence[JsonEvent]:
"""Read json events from the specified file."""
-
try:
- result = json.load(open(path), object_hook=JsonEvent)
+ events = json.load(open(path), object_hook=JsonEvent)
except BaseException as err:
print(f"Exception processing {path}")
raise
- for event in result:
+ metrics: list[Tuple[str, metric.Expression]] = []
+ for event in events:
event.topic = topic
- return result
+ if event.metric_name and '-' not in event.metric_name:
+ metrics.append((event.metric_name, event.metric_expr))
+ updates = metric.RewriteMetricsInTermsOfOthers(metrics)
+ if updates:
+ for event in events:
+ if event.metric_name in updates:
+ # print(f'Updated {event.metric_name} from\n"{event.metric_expr}"\n'
+ # f'to\n"{updates[event.metric_name]}"')
+ event.metric_expr = updates[event.metric_name]
+
+ return events
def preprocess_arch_std_files(archpath: str) -> None:
"""Read in all architecture standard events."""
@@ -358,26 +389,20 @@ def preprocess_arch_std_files(archpath: str) -> None:
for event in read_json_events(item.path, topic=''):
if event.name:
_arch_std_events[event.name.lower()] = event
-
-
-def print_events_table_prefix(tblname: str) -> None:
- """Called when a new events table is started."""
- global _close_table
- if _close_table:
- raise IOError('Printing table prefix but last table has no suffix')
- _args.output_file.write(f'static const struct compact_pmu_event {tblname}[] = {{\n')
- _close_table = True
+ if event.metric_name:
+ _arch_std_events[event.metric_name.lower()] = event
def add_events_table_entries(item: os.DirEntry, topic: str) -> None:
"""Add contents of file to _pending_events table."""
- if not _close_table:
- raise IOError('Table entries missing prefix')
for e in read_json_events(item.path, topic):
- _pending_events.append(e)
+ if e.name:
+ _pending_events.append(e)
+ if e.metric_name:
+ _pending_metrics.append(e)
-def print_events_table_suffix() -> None:
+def print_pending_events() -> None:
"""Optionally close events table."""
def event_cmp_key(j: JsonEvent) -> Tuple[bool, str, str, str, str]:
@@ -389,17 +414,58 @@ def print_events_table_suffix() -> None:
return (j.desc is not None, fix_none(j.topic), fix_none(j.name), fix_none(j.pmu),
fix_none(j.metric_name))
- global _close_table
- if not _close_table:
+ global _pending_events
+ if not _pending_events:
return
- global _pending_events
+ global _pending_events_tblname
+ if _pending_events_tblname.endswith('_sys'):
+ global _sys_event_tables
+ _sys_event_tables.append(_pending_events_tblname)
+ else:
+ global event_tables
+ _event_tables.append(_pending_events_tblname)
+
+ _args.output_file.write(
+ f'static const struct compact_pmu_event {_pending_events_tblname}[] = {{\n')
+
for event in sorted(_pending_events, key=event_cmp_key):
- _args.output_file.write(event.to_c_string())
- _pending_events = []
+ _args.output_file.write(event.to_c_string(metric=False))
+ _pending_events = []
+
+ _args.output_file.write('};\n\n')
+
+def print_pending_metrics() -> None:
+ """Optionally close metrics table."""
+
+ def metric_cmp_key(j: JsonEvent) -> Tuple[bool, str, str]:
+ def fix_none(s: Optional[str]) -> str:
+ if s is None:
+ return ''
+ return s
+
+ return (j.desc is not None, fix_none(j.pmu), fix_none(j.metric_name))
+
+ global _pending_metrics
+ if not _pending_metrics:
+ return
+
+ global _pending_metrics_tblname
+ if _pending_metrics_tblname.endswith('_sys'):
+ global _sys_metric_tables
+ _sys_metric_tables.append(_pending_metrics_tblname)
+ else:
+ global metric_tables
+ _metric_tables.append(_pending_metrics_tblname)
+
+ _args.output_file.write(
+ f'static const struct compact_pmu_event {_pending_metrics_tblname}[] = {{\n')
+
+ for metric in sorted(_pending_metrics, key=metric_cmp_key):
+ _args.output_file.write(metric.to_c_string(metric=True))
+ _pending_metrics = []
_args.output_file.write('};\n\n')
- _close_table = False
def get_topic(topic: str) -> str:
if topic.endswith('metrics.json'):
@@ -423,12 +489,13 @@ def preprocess_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
topic = get_topic(item.name)
for event in read_json_events(item.path, topic):
- _bcs.add(event.build_c_string())
+ if event.name:
+ _bcs.add(event.build_c_string(metric=False))
+ if event.metric_name:
+ _bcs.add(event.build_c_string(metric=True))
def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
"""Process a JSON file during the main walk."""
- global _sys_event_tables
-
def is_leaf_dir(path: str) -> bool:
for item in os.scandir(path):
if item.is_dir():
@@ -437,12 +504,16 @@ def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
# model directory, reset topic
if item.is_dir() and is_leaf_dir(item.path):
- print_events_table_suffix()
+ print_pending_events()
+ print_pending_metrics()
+
+ global _pending_events_tblname
+ _pending_events_tblname = file_name_to_table_name('pmu_events_', parents, item.name)
+ global _pending_metrics_tblname
+ _pending_metrics_tblname = file_name_to_table_name('pmu_metrics_', parents, item.name)
- tblname = file_name_to_table_name(parents, item.name)
if item.name == 'sys':
- _sys_event_tables.append(tblname)
- print_events_table_prefix(tblname)
+ _sys_event_table_to_metric_table_mapping[_pending_events_tblname] = _pending_metrics_tblname
return
# base dir or too deep
@@ -467,6 +538,12 @@ struct pmu_events_table {
size_t length;
};
+/* Struct used to make the PMU metric table implementation opaque to callers. */
+struct pmu_metrics_table {
+ const struct compact_pmu_event *entries;
+ size_t length;
+};
+
/*
* Map a CPU to its table of PMU events. The CPU is identified by the
* cpuid field, which is an arch-specific identifier for the CPU.
@@ -478,7 +555,8 @@ struct pmu_events_table {
struct pmu_events_map {
const char *arch;
const char *cpuid;
- struct pmu_events_table table;
+ struct pmu_events_table event_table;
+ struct pmu_metrics_table metric_table;
};
/*
@@ -492,9 +570,13 @@ const struct pmu_events_map pmu_events_map[] = {
_args.output_file.write("""{
\t.arch = "testarch",
\t.cpuid = "testcpu",
-\t.table = {
-\t.entries = pme_test_soc_cpu,
-\t.length = ARRAY_SIZE(pme_test_soc_cpu),
+\t.event_table = {
+\t\t.entries = pmu_events__test_soc_cpu,
+\t\t.length = ARRAY_SIZE(pmu_events__test_soc_cpu),
+\t},
+\t.metric_table = {
+\t\t.entries = pmu_metrics__test_soc_cpu,
+\t\t.length = ARRAY_SIZE(pmu_metrics__test_soc_cpu),
\t}
},
""")
@@ -505,14 +587,31 @@ const struct pmu_events_map pmu_events_map[] = {
for row in table:
# Skip the first row or any row beginning with #.
if not first and len(row) > 0 and not row[0].startswith('#'):
- tblname = file_name_to_table_name([], row[2].replace('/', '_'))
+ event_tblname = file_name_to_table_name('pmu_events_', [], row[2].replace('/', '_'))
+ if event_tblname in _event_tables:
+ event_size = f'ARRAY_SIZE({event_tblname})'
+ else:
+ event_tblname = 'NULL'
+ event_size = '0'
+ metric_tblname = file_name_to_table_name('pmu_metrics_', [], row[2].replace('/', '_'))
+ if metric_tblname in _metric_tables:
+ metric_size = f'ARRAY_SIZE({metric_tblname})'
+ else:
+ metric_tblname = 'NULL'
+ metric_size = '0'
+ if event_size == '0' and metric_size == '0':
+ continue
cpuid = row[0].replace('\\', '\\\\')
_args.output_file.write(f"""{{
\t.arch = "{arch}",
\t.cpuid = "{cpuid}",
-\t.table = {{
-\t\t.entries = {tblname},
-\t\t.length = ARRAY_SIZE({tblname})
+\t.event_table = {{
+\t\t.entries = {event_tblname},
+\t\t.length = {event_size}
+\t}},
+\t.metric_table = {{
+\t\t.entries = {metric_tblname},
+\t\t.length = {metric_size}
\t}}
}},
""")
@@ -521,7 +620,8 @@ const struct pmu_events_map pmu_events_map[] = {
_args.output_file.write("""{
\t.arch = 0,
\t.cpuid = 0,
-\t.table = { 0, 0 },
+\t.event_table = { 0, 0 },
+\t.metric_table = { 0, 0 },
}
};
""")
@@ -532,14 +632,36 @@ def print_system_mapping_table() -> None:
_args.output_file.write("""
struct pmu_sys_events {
\tconst char *name;
-\tstruct pmu_events_table table;
+\tstruct pmu_events_table event_table;
+\tstruct pmu_metrics_table metric_table;
};
static const struct pmu_sys_events pmu_sys_event_tables[] = {
""")
+ printed_metric_tables = []
for tblname in _sys_event_tables:
_args.output_file.write(f"""\t{{
-\t\t.table = {{
+\t\t.event_table = {{
+\t\t\t.entries = {tblname},
+\t\t\t.length = ARRAY_SIZE({tblname})
+\t\t}},""")
+ metric_tblname = _sys_event_table_to_metric_table_mapping[tblname]
+ if metric_tblname in _sys_metric_tables:
+ _args.output_file.write(f"""
+\t\t.metric_table = {{
+\t\t\t.entries = {metric_tblname},
+\t\t\t.length = ARRAY_SIZE({metric_tblname})
+\t\t}},""")
+ printed_metric_tables.append(metric_tblname)
+ _args.output_file.write(f"""
+\t\t.name = \"{tblname}\",
+\t}},
+""")
+ for tblname in _sys_metric_tables:
+ if tblname in printed_metric_tables:
+ continue
+ _args.output_file.write(f"""\t{{
+\t\t.metric_table = {{
\t\t\t.entries = {tblname},
\t\t\t.length = ARRAY_SIZE({tblname})
\t\t}},
@@ -547,11 +669,12 @@ static const struct pmu_sys_events pmu_sys_event_tables[] = {
\t}},
""")
_args.output_file.write("""\t{
-\t\t.table = { 0, 0 }
+\t\t.event_table = { 0, 0 },
+\t\t.metric_table = { 0, 0 },
\t},
};
-static void decompress(int offset, struct pmu_event *pe)
+static void decompress_event(int offset, struct pmu_event *pe)
{
\tconst char *p = &big_c_string[offset];
""")
@@ -564,6 +687,19 @@ static void decompress(int offset, struct pmu_event *pe)
_args.output_file.write('\twhile (*p++);')
_args.output_file.write("""}
+static void decompress_metric(int offset, struct pmu_metric *pm)
+{
+\tconst char *p = &big_c_string[offset];
+""")
+ for attr in _json_metric_attributes:
+ _args.output_file.write(f"""
+\tpm->{attr} = (*p == '\\0' ? NULL : p);
+""")
+ if attr == _json_metric_attributes[-1]:
+ continue
+ _args.output_file.write('\twhile (*p++);')
+ _args.output_file.write("""}
+
int pmu_events_table_for_each_event(const struct pmu_events_table *table,
pmu_event_iter_fn fn,
void *data)
@@ -572,7 +708,9 @@ int pmu_events_table_for_each_event(const struct pmu_events_table *table,
struct pmu_event pe;
int ret;
- decompress(table->entries[i].offset, &pe);
+ decompress_event(table->entries[i].offset, &pe);
+ if (!pe.name)
+ continue;
ret = fn(&pe, table, data);
if (ret)
return ret;
@@ -580,7 +718,25 @@ int pmu_events_table_for_each_event(const struct pmu_events_table *table,
return 0;
}
-const struct pmu_events_table *perf_pmu__find_table(struct perf_pmu *pmu)
+int pmu_metrics_table_for_each_metric(const struct pmu_metrics_table *table,
+ pmu_metric_iter_fn fn,
+ void *data)
+{
+ for (size_t i = 0; i < table->length; i++) {
+ struct pmu_metric pm;
+ int ret;
+
+ decompress_metric(table->entries[i].offset, &pm);
+ if (!pm.metric_expr)
+ continue;
+ ret = fn(&pm, table, data);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
{
const struct pmu_events_table *table = NULL;
char *cpuid = perf_pmu__getcpuid(pmu);
@@ -599,7 +755,34 @@ const struct pmu_events_table *perf_pmu__find_table(struct perf_pmu *pmu)
break;
if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
- table = &map->table;
+ table = &map->event_table;
+ break;
+ }
+ }
+ free(cpuid);
+ return table;
+}
+
+const struct pmu_metrics_table *perf_pmu__find_metrics_table(struct perf_pmu *pmu)
+{
+ const struct pmu_metrics_table *table = NULL;
+ char *cpuid = perf_pmu__getcpuid(pmu);
+ int i;
+
+ /* on some platforms which uses cpus map, cpuid can be NULL for
+ * PMUs other than CORE PMUs.
+ */
+ if (!cpuid)
+ return NULL;
+
+ i = 0;
+ for (;;) {
+ const struct pmu_events_map *map = &pmu_events_map[i++];
+ if (!map->arch)
+ break;
+
+ if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
+ table = &map->metric_table;
break;
}
}
@@ -613,7 +796,18 @@ const struct pmu_events_table *find_core_events_table(const char *arch, const ch
tables->arch;
tables++) {
if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
- return &tables->table;
+ return &tables->event_table;
+ }
+ return NULL;
+}
+
+const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid)
+{
+ for (const struct pmu_events_map *tables = &pmu_events_map[0];
+ tables->arch;
+ tables++) {
+ if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
+ return &tables->metric_table;
}
return NULL;
}
@@ -623,7 +817,20 @@ int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
for (const struct pmu_events_map *tables = &pmu_events_map[0];
tables->arch;
tables++) {
- int ret = pmu_events_table_for_each_event(&tables->table, fn, data);
+ int ret = pmu_events_table_for_each_event(&tables->event_table, fn, data);
+
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data)
+{
+ for (const struct pmu_events_map *tables = &pmu_events_map[0];
+ tables->arch;
+ tables++) {
+ int ret = pmu_metrics_table_for_each_metric(&tables->metric_table, fn, data);
if (ret)
return ret;
@@ -637,7 +844,7 @@ const struct pmu_events_table *find_sys_events_table(const char *name)
tables->name;
tables++) {
if (!strcmp(tables->name, name))
- return &tables->table;
+ return &tables->event_table;
}
return NULL;
}
@@ -647,7 +854,20 @@ int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
tables->name;
tables++) {
- int ret = pmu_events_table_for_each_event(&tables->table, fn, data);
+ int ret = pmu_events_table_for_each_event(&tables->event_table, fn, data);
+
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+int pmu_for_each_sys_metric(pmu_metric_iter_fn fn, void *data)
+{
+ for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
+ tables->name;
+ tables++) {
+ int ret = pmu_metrics_table_for_each_metric(&tables->metric_table, fn, data);
if (ret)
return ret;
@@ -670,12 +890,24 @@ def main() -> None:
action: Callable[[Sequence[str], os.DirEntry], None]) -> None:
"""Replicate the directory/file walking behavior of C's file tree walk."""
for item in os.scandir(path):
+ if _args.model != 'all' and item.is_dir():
+ # Check if the model matches one in _args.model.
+ if len(parents) == _args.model.split(',')[0].count('/'):
+ # We're testing the correct directory.
+ item_path = '/'.join(parents) + ('/' if len(parents) > 0 else '') + item.name
+ if 'test' not in item_path and item_path not in _args.model.split(','):
+ continue
action(parents, item)
if item.is_dir():
ftw(item.path, parents + [item.name], action)
ap = argparse.ArgumentParser()
ap.add_argument('arch', help='Architecture name like x86')
+ ap.add_argument('model', help='''Select a model such as skylake to
+reduce the code size. Normally set to "all". For architectures like
+ARM64 with an implementor/model, the model must include the implementor
+such as "arm/cortex-a34".''',
+ default='all')
ap.add_argument(
'starting_dir',
type=dir_path,
@@ -721,7 +953,8 @@ struct compact_pmu_event {
for arch in archs:
arch_path = f'{_args.starting_dir}/{arch}'
ftw(arch_path, [], process_one_file)
- print_events_table_suffix()
+ print_pending_events()
+ print_pending_metrics()
print_mapping_table(archs)
print_system_mapping_table()