summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
blob: 0394a1156d99d12acd73d578e6012032862394e3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include "get_branch_snapshot.skel.h"

static int *pfd_array;
static int cpu_cnt;

static bool is_hypervisor(void)
{
	char *line = NULL;
	bool ret = false;
	size_t len;
	FILE *fp;

	fp = fopen("/proc/cpuinfo", "r");
	if (!fp)
		return false;

	while (getline(&line, &len, fp) != -1) {
		if (!strncmp(line, "flags", 5)) {
			if (strstr(line, "hypervisor") != NULL)
				ret = true;
			break;
		}
	}

	free(line);
	fclose(fp);
	return ret;
}

static int create_perf_events(void)
{
	struct perf_event_attr attr = {0};
	int cpu;

	/* create perf event */
	attr.size = sizeof(attr);
	attr.type = PERF_TYPE_HARDWARE;
	attr.config = PERF_COUNT_HW_CPU_CYCLES;
	attr.sample_type = PERF_SAMPLE_BRANCH_STACK;
	attr.branch_sample_type = PERF_SAMPLE_BRANCH_KERNEL |
		PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY;

	cpu_cnt = libbpf_num_possible_cpus();
	pfd_array = malloc(sizeof(int) * cpu_cnt);
	if (!pfd_array) {
		cpu_cnt = 0;
		return 1;
	}

	for (cpu = 0; cpu < cpu_cnt; cpu++) {
		pfd_array[cpu] = syscall(__NR_perf_event_open, &attr,
					 -1, cpu, -1, PERF_FLAG_FD_CLOEXEC);
		if (pfd_array[cpu] < 0)
			break;
	}

	return cpu == 0;
}

static void close_perf_events(void)
{
	int cpu, fd;

	for (cpu = 0; cpu < cpu_cnt; cpu++) {
		fd = pfd_array[cpu];
		if (fd < 0)
			break;
		close(fd);
	}
	free(pfd_array);
}

void serial_test_get_branch_snapshot(void)
{
	struct get_branch_snapshot *skel = NULL;
	int err;

	/* Skip the test before we fix LBR snapshot for hypervisor. */
	if (is_hypervisor()) {
		test__skip();
		return;
	}

	if (create_perf_events()) {
		test__skip();  /* system doesn't support LBR */
		goto cleanup;
	}

	skel = get_branch_snapshot__open_and_load();
	if (!ASSERT_OK_PTR(skel, "get_branch_snapshot__open_and_load"))
		goto cleanup;

	err = kallsyms_find("bpf_testmod_loop_test", &skel->bss->address_low);
	if (!ASSERT_OK(err, "kallsyms_find"))
		goto cleanup;

	/* Just a guess for the end of this function, as module functions
	 * in /proc/kallsyms could come in any order.
	 */
	skel->bss->address_high = skel->bss->address_low + 128;

	err = get_branch_snapshot__attach(skel);
	if (!ASSERT_OK(err, "get_branch_snapshot__attach"))
		goto cleanup;

	trigger_module_test_read(100);

	if (skel->bss->total_entries < 16) {
		/* too few entries for the hit/waste test */
		test__skip();
		goto cleanup;
	}

	ASSERT_GT(skel->bss->test1_hits, 6, "find_looptest_in_lbr");

	/* Given we stop LBR in software, we will waste a few entries.
	 * But we should try to waste as few as possible entries. We are at
	 * about 7 on x86_64 systems.
	 * Add a check for < 10 so that we get heads-up when something
	 * changes and wastes too many entries.
	 */
	ASSERT_LT(skel->bss->wasted_entries, 10, "check_wasted_entries");

cleanup:
	get_branch_snapshot__destroy(skel);
	close_perf_events();
}