summaryrefslogtreecommitdiff
path: root/mm/kasan
diff options
context:
space:
mode:
Diffstat (limited to 'mm/kasan')
-rw-r--r--mm/kasan/common.c2
-rw-r--r--mm/kasan/kasan_test.c82
-rw-r--r--mm/kasan/kasan_test_module.c4
-rw-r--r--mm/kasan/report.c2
-rw-r--r--mm/kasan/shadow.c11
5 files changed, 87 insertions, 14 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 6ca63e8dda74..e7c9a4dc89f8 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -55,7 +55,7 @@ void kasan_set_track(struct kasan_track *track, depot_stack_handle_t stack)
u64 ts_nsec = local_clock();
track->cpu = cpu;
- track->timestamp = ts_nsec >> 3;
+ track->timestamp = ts_nsec >> 9;
#endif /* CONFIG_KASAN_EXTRA_INFO */
track->pid = current->pid;
track->stack = stack;
diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
index 971cfff4ca0b..7b32be2a3cf0 100644
--- a/mm/kasan/kasan_test.c
+++ b/mm/kasan/kasan_test.c
@@ -440,7 +440,8 @@ static void kmalloc_oob_16(struct kunit *test)
/* This test is specifically crafted for the generic mode. */
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
- ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
+ /* RELOC_HIDE to prevent gcc from warning about short alloc */
+ ptr1 = RELOC_HIDE(kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL), 0);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
@@ -697,6 +698,84 @@ static void kmalloc_uaf3(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]);
}
+static void kasan_atomics_helper(struct kunit *test, void *unsafe, void *safe)
+{
+ int *i_unsafe = unsafe;
+
+ KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*i_unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, WRITE_ONCE(*i_unsafe, 42));
+ KUNIT_EXPECT_KASAN_FAIL(test, smp_load_acquire(i_unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, smp_store_release(i_unsafe, 42));
+
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_read(unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_set(unsafe, 42));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_add(42, unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub(42, unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc(unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec(unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_and(42, unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_andnot(42, unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_or(42, unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_xor(42, unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_xchg(unsafe, 42));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_cmpxchg(unsafe, 21, 42));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(unsafe, safe, 42));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(safe, unsafe, 42));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub_and_test(42, unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_and_test(unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_and_test(unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_negative(42, unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_unless(unsafe, 21, 42));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_not_zero(unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_unless_negative(unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_unless_positive(unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_if_positive(unsafe));
+
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_read(unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_set(unsafe, 42));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add(42, unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub(42, unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc(unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec(unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_and(42, unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_andnot(42, unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_or(42, unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xor(42, unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xchg(unsafe, 42));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_cmpxchg(unsafe, 21, 42));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(unsafe, safe, 42));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(safe, unsafe, 42));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub_and_test(42, unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_and_test(unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_and_test(unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_negative(42, unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_unless(unsafe, 21, 42));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_not_zero(unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_unless_negative(unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_unless_positive(unsafe));
+ KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_if_positive(unsafe));
+}
+
+static void kasan_atomics(struct kunit *test)
+{
+ void *a1, *a2;
+
+ /*
+ * Just as with kasan_bitops_tags(), we allocate 48 bytes of memory such
+ * that the following 16 bytes will make up the redzone.
+ */
+ a1 = kzalloc(48, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a1);
+ a2 = kzalloc(sizeof(atomic_long_t), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a2);
+
+ /* Use atomics to access the redzone. */
+ kasan_atomics_helper(test, a1 + 48, a2);
+
+ kfree(a1);
+ kfree(a2);
+}
+
static void kmalloc_double_kzfree(struct kunit *test)
{
char *ptr;
@@ -1883,6 +1962,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kasan_strings),
KUNIT_CASE(kasan_bitops_generic),
KUNIT_CASE(kasan_bitops_tags),
+ KUNIT_CASE(kasan_atomics),
KUNIT_CASE(vmalloc_helpers_tags),
KUNIT_CASE(vmalloc_oob),
KUNIT_CASE(vmap_tags),
diff --git a/mm/kasan/kasan_test_module.c b/mm/kasan/kasan_test_module.c
index 8b7b3ea2c74e..27ec22767e42 100644
--- a/mm/kasan/kasan_test_module.c
+++ b/mm/kasan/kasan_test_module.c
@@ -62,7 +62,7 @@ static noinline void __init copy_user_test(void)
kfree(kmem);
}
-static int __init test_kasan_module_init(void)
+static int __init kasan_test_module_init(void)
{
/*
* Temporarily enable multi-shot mode. Otherwise, KASAN would only
@@ -77,5 +77,5 @@ static int __init test_kasan_module_init(void)
return -EAGAIN;
}
-module_init(test_kasan_module_init);
+module_init(kasan_test_module_init);
MODULE_LICENSE("GPL");
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 7afa4feb03e1..b48c768acc84 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -267,7 +267,7 @@ static void print_track(struct kasan_track *track, const char *prefix)
u64 ts_nsec = track->timestamp;
unsigned long rem_usec;
- ts_nsec <<= 3;
+ ts_nsec <<= 9;
rem_usec = do_div(ts_nsec, NSEC_PER_SEC) / 1000;
pr_err("%s by task %u on cpu %d at %lu.%06lus:\n",
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index 9ef84f31833f..d6210ca48dda 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -199,19 +199,12 @@ static bool shadow_mapped(unsigned long addr)
pud = pud_offset(p4d, addr);
if (pud_none(*pud))
return false;
-
- /*
- * We can't use pud_large() or pud_huge(), the first one is
- * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse
- * pud_bad(), if pud is bad then it's bad because it's huge.
- */
- if (pud_bad(*pud))
+ if (pud_leaf(*pud))
return true;
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
return false;
-
- if (pmd_bad(*pmd))
+ if (pmd_leaf(*pmd))
return true;
pte = pte_offset_kernel(pmd, addr);
return !pte_none(ptep_get(pte));