summaryrefslogtreecommitdiff
path: root/tools/perf/arch/arm64/util/pmu.c
blob: ef1ed645097c62e1cc6ecd791b8ecd762201f42d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
// SPDX-License-Identifier: GPL-2.0

#include <internal/cpumap.h>
#include "../../../util/cpumap.h"
#include "../../../util/pmu.h"
#include <api/fs/fs.h>
#include <math.h>

static struct perf_pmu *pmu__find_core_pmu(void)
{
	struct perf_pmu *pmu = NULL;

	while ((pmu = perf_pmu__scan(pmu))) {
		if (!is_pmu_core(pmu->name))
			continue;

		/*
		 * The cpumap should cover all CPUs. Otherwise, some CPUs may
		 * not support some events or have different event IDs.
		 */
		if (RC_CHK_ACCESS(pmu->cpus)->nr != cpu__max_cpu().cpu)
			return NULL;

		return pmu;
	}
	return NULL;
}

const struct pmu_metrics_table *pmu_metrics_table__find(void)
{
	struct perf_pmu *pmu = pmu__find_core_pmu();

	if (pmu)
		return perf_pmu__find_metrics_table(pmu);

	return NULL;
}

const struct pmu_events_table *pmu_events_table__find(void)
{
	struct perf_pmu *pmu = pmu__find_core_pmu();

	if (pmu)
		return perf_pmu__find_events_table(pmu);

	return NULL;
}

double perf_pmu__cpu_slots_per_cycle(void)
{
	char path[PATH_MAX];
	unsigned long long slots = 0;
	struct perf_pmu *pmu = pmu__find_core_pmu();

	if (pmu) {
		perf_pmu__pathname_scnprintf(path, sizeof(path),
					     pmu->name, "caps/slots");
		/*
		 * The value of slots is not greater than 32 bits, but sysfs__read_int
		 * can't read value with 0x prefix, so use sysfs__read_ull instead.
		 */
		sysfs__read_ull(path, &slots);
	}

	return slots ? (double)slots : NAN;
}