summaryrefslogtreecommitdiff
path: root/include/linux/alloc_tag.h
blob: d18388c0e9e1907c7dfa4bb25b7b6adbcc59eafe (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * allocation tagging
 */
#ifndef _LINUX_ALLOC_TAG_H
#define _LINUX_ALLOC_TAG_H

#include <linux/bug.h>
#include <linux/codetag.h>
#include <linux/container_of.h>
#include <linux/preempt.h>
#include <asm/percpu.h>
#include <linux/cpumask.h>
#include <linux/static_key.h>
#include <linux/irqflags.h>

struct alloc_tag_counters {
	u64 bytes;
	u64 calls;
};

/*
 * An instance of this structure is created in a special ELF section at every
 * allocation callsite. At runtime, the special section is treated as
 * an array of these. Embedded codetag utilizes codetag framework.
 */
struct alloc_tag {
	struct codetag			ct;
	struct alloc_tag_counters __percpu	*counters;
} __aligned(8);

#ifdef CONFIG_MEM_ALLOC_PROFILING

struct codetag_bytes {
	struct codetag *ct;
	s64 bytes;
};

size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep);

static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct)
{
	return container_of(ct, struct alloc_tag, ct);
}

#ifdef ARCH_NEEDS_WEAK_PER_CPU
/*
 * When percpu variables are required to be defined as weak, static percpu
 * variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION).
 * Instead we will accound all module allocations to a single counter.
 */
DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);

#define DEFINE_ALLOC_TAG(_alloc_tag)						\
	static struct alloc_tag _alloc_tag __used __aligned(8)			\
	__section("alloc_tags") = {						\
		.ct = CODE_TAG_INIT,						\
		.counters = &_shared_alloc_tag };

#else /* ARCH_NEEDS_WEAK_PER_CPU */

#define DEFINE_ALLOC_TAG(_alloc_tag)						\
	static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr);	\
	static struct alloc_tag _alloc_tag __used __aligned(8)			\
	__section("alloc_tags") = {						\
		.ct = CODE_TAG_INIT,						\
		.counters = &_alloc_tag_cntr };

#endif /* ARCH_NEEDS_WEAK_PER_CPU */

DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
			mem_alloc_profiling_key);

static inline bool mem_alloc_profiling_enabled(void)
{
	return static_branch_maybe(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
				   &mem_alloc_profiling_key);
}

static inline struct alloc_tag_counters alloc_tag_read(struct alloc_tag *tag)
{
	struct alloc_tag_counters v = { 0, 0 };
	struct alloc_tag_counters *counter;
	int cpu;

	for_each_possible_cpu(cpu) {
		counter = per_cpu_ptr(tag->counters, cpu);
		v.bytes += counter->bytes;
		v.calls += counter->calls;
	}

	return v;
}

#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag)
{
	WARN_ONCE(ref && ref->ct,
		  "alloc_tag was not cleared (got tag for %s:%u)\n",
		  ref->ct->filename, ref->ct->lineno);

	WARN_ONCE(!tag, "current->alloc_tag not set");
}

static inline void alloc_tag_sub_check(union codetag_ref *ref)
{
	WARN_ONCE(ref && !ref->ct, "alloc_tag was not set\n");
}
#else
static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag) {}
static inline void alloc_tag_sub_check(union codetag_ref *ref) {}
#endif

/* Caller should verify both ref and tag to be valid */
static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
{
	ref->ct = &tag->ct;
	/*
	 * We need in increment the call counter every time we have a new
	 * allocation or when we split a large allocation into smaller ones.
	 * Each new reference for every sub-allocation needs to increment call
	 * counter because when we free each part the counter will be decremented.
	 */
	this_cpu_inc(tag->counters->calls);
}

static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
{
	alloc_tag_add_check(ref, tag);
	if (!ref || !tag)
		return;

	__alloc_tag_ref_set(ref, tag);
}

static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes)
{
	alloc_tag_add_check(ref, tag);
	if (!ref || !tag)
		return;

	__alloc_tag_ref_set(ref, tag);
	this_cpu_add(tag->counters->bytes, bytes);
}

static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes)
{
	struct alloc_tag *tag;

	alloc_tag_sub_check(ref);
	if (!ref || !ref->ct)
		return;

	tag = ct_to_alloc_tag(ref->ct);

	this_cpu_sub(tag->counters->bytes, bytes);
	this_cpu_dec(tag->counters->calls);

	ref->ct = NULL;
}

#define alloc_tag_record(p)	((p) = current->alloc_tag)

#else /* CONFIG_MEM_ALLOC_PROFILING */

#define DEFINE_ALLOC_TAG(_alloc_tag)
static inline bool mem_alloc_profiling_enabled(void) { return false; }
static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag,
				 size_t bytes) {}
static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {}
#define alloc_tag_record(p)	do {} while (0)

#endif /* CONFIG_MEM_ALLOC_PROFILING */

#define alloc_hooks_tag(_tag, _do_alloc)				\
({									\
	struct alloc_tag * __maybe_unused _old = alloc_tag_save(_tag);	\
	typeof(_do_alloc) _res = _do_alloc;				\
	alloc_tag_restore(_tag, _old);					\
	_res;								\
})

#define alloc_hooks(_do_alloc)						\
({									\
	DEFINE_ALLOC_TAG(_alloc_tag);					\
	alloc_hooks_tag(&_alloc_tag, _do_alloc);			\
})

#endif /* _LINUX_ALLOC_TAG_H */