summaryrefslogtreecommitdiff
path: root/mm/kasan/tags.c
blob: dd929ab166fb4d46bc61d51ea1620856e5c40eba (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
// SPDX-License-Identifier: GPL-2.0
/*
 * This file contains common tag-based KASAN code.
 *
 * Copyright (c) 2018 Google, Inc.
 * Copyright (c) 2020 Google, Inc.
 */

#include <linux/atomic.h>
#include <linux/init.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/memory.h>
#include <linux/mm.h>
#include <linux/static_key.h>
#include <linux/string.h>
#include <linux/types.h>

#include "kasan.h"
#include "../slab.h"

enum kasan_arg_stacktrace {
	KASAN_ARG_STACKTRACE_DEFAULT,
	KASAN_ARG_STACKTRACE_OFF,
	KASAN_ARG_STACKTRACE_ON,
};

static enum kasan_arg_stacktrace kasan_arg_stacktrace __initdata;

/* Whether to collect alloc/free stack traces. */
DEFINE_STATIC_KEY_TRUE(kasan_flag_stacktrace);

/* Non-zero, as initial pointer values are 0. */
#define STACK_RING_BUSY_PTR ((void *)1)

struct kasan_stack_ring stack_ring = {
	.lock = __RW_LOCK_UNLOCKED(stack_ring.lock)
};

/* kasan.stacktrace=off/on */
static int __init early_kasan_flag_stacktrace(char *arg)
{
	if (!arg)
		return -EINVAL;

	if (!strcmp(arg, "off"))
		kasan_arg_stacktrace = KASAN_ARG_STACKTRACE_OFF;
	else if (!strcmp(arg, "on"))
		kasan_arg_stacktrace = KASAN_ARG_STACKTRACE_ON;
	else
		return -EINVAL;

	return 0;
}
early_param("kasan.stacktrace", early_kasan_flag_stacktrace);

void __init kasan_init_tags(void)
{
	switch (kasan_arg_stacktrace) {
	case KASAN_ARG_STACKTRACE_DEFAULT:
		/* Default is specified by kasan_flag_stacktrace definition. */
		break;
	case KASAN_ARG_STACKTRACE_OFF:
		static_branch_disable(&kasan_flag_stacktrace);
		break;
	case KASAN_ARG_STACKTRACE_ON:
		static_branch_enable(&kasan_flag_stacktrace);
		break;
	}
}

static void save_stack_info(struct kmem_cache *cache, void *object,
			gfp_t gfp_flags, bool is_free)
{
	unsigned long flags;
	depot_stack_handle_t stack;
	u64 pos;
	struct kasan_stack_ring_entry *entry;
	void *old_ptr;

	stack = kasan_save_stack(gfp_flags, true);

	/*
	 * Prevent save_stack_info() from modifying stack ring
	 * when kasan_complete_mode_report_info() is walking it.
	 */
	read_lock_irqsave(&stack_ring.lock, flags);

next:
	pos = atomic64_fetch_add(1, &stack_ring.pos);
	entry = &stack_ring.entries[pos % KASAN_STACK_RING_SIZE];

	/* Detect stack ring entry slots that are being written to. */
	old_ptr = READ_ONCE(entry->ptr);
	if (old_ptr == STACK_RING_BUSY_PTR)
		goto next; /* Busy slot. */
	if (!try_cmpxchg(&entry->ptr, &old_ptr, STACK_RING_BUSY_PTR))
		goto next; /* Busy slot. */

	WRITE_ONCE(entry->size, cache->object_size);
	WRITE_ONCE(entry->pid, current->pid);
	WRITE_ONCE(entry->stack, stack);
	WRITE_ONCE(entry->is_free, is_free);

	/*
	 * Paired with smp_load_acquire() in kasan_complete_mode_report_info().
	 */
	smp_store_release(&entry->ptr, (s64)object);

	read_unlock_irqrestore(&stack_ring.lock, flags);
}

void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
{
	save_stack_info(cache, object, flags, false);
}

void kasan_save_free_info(struct kmem_cache *cache, void *object)
{
	save_stack_info(cache, object, GFP_NOWAIT, true);
}