summaryrefslogtreecommitdiff
path: root/mm/kasan/shadow.c
diff options
context:
space:
mode:
authorAndrey Konovalov <andreyknvl@google.com>2021-02-26 04:19:59 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2021-02-26 20:41:02 +0300
commite2db1a9aa3814960a56583df39ea71e36d802278 (patch)
treeb4918793cf7ad4f5ba30e83f6802861f6b367d11 /mm/kasan/shadow.c
parent928501344fc645f80390afc12708c81b3595745d (diff)
downloadlinux-e2db1a9aa3814960a56583df39ea71e36d802278.tar.xz
kasan, mm: optimize kmalloc poisoning
For allocations from kmalloc caches, kasan_kmalloc() always follows kasan_slab_alloc(). Currenly, both of them unpoison the whole object, which is unnecessary. This patch provides separate implementations for both annotations: kasan_slab_alloc() unpoisons the whole object, and kasan_kmalloc() only poisons the redzone. For generic KASAN, the redzone start might not be aligned to KASAN_GRANULE_SIZE. Therefore, the poisoning is split in two parts: kasan_poison_last_granule() poisons the unaligned part, and then kasan_poison() poisons the rest. This patch also clarifies alignment guarantees of each of the poisoning functions and drops the unnecessary round_up() call for redzone_end. With this change, the early SLUB cache annotation needs to be changed to kasan_slab_alloc(), as kasan_kmalloc() doesn't unpoison objects now. The number of poisoned bytes for objects in this cache stays the same, as kmem_cache_node->object_size is equal to sizeof(struct kmem_cache_node). Link: https://lkml.kernel.org/r/7e3961cb52be380bc412860332063f5f7ce10d13.1612546384.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Marco Elver <elver@google.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Branislav Rankov <Branislav.Rankov@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Kevin Brodsky <kevin.brodsky@arm.com> Cc: Peter Collingbourne <pcc@google.com> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/kasan/shadow.c')
-rw-r--r--mm/kasan/shadow.c28
1 files changed, 15 insertions, 13 deletions
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index 1372a2fc0ca9..1ed7817e4ee6 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -69,10 +69,6 @@ void *memcpy(void *dest, const void *src, size_t len)
return __memcpy(dest, src, len);
}
-/*
- * Poisons the shadow memory for 'size' bytes starting from 'addr'.
- * Memory addresses should be aligned to KASAN_GRANULE_SIZE.
- */
void kasan_poison(const void *address, size_t size, u8 value)
{
void *shadow_start, *shadow_end;
@@ -83,12 +79,12 @@ void kasan_poison(const void *address, size_t size, u8 value)
* addresses to this function.
*/
address = kasan_reset_tag(address);
- size = round_up(size, KASAN_GRANULE_SIZE);
/* Skip KFENCE memory if called explicitly outside of sl*b. */
if (is_kfence_address(address))
return;
+ size = round_up(size, KASAN_GRANULE_SIZE);
shadow_start = kasan_mem_to_shadow(address);
shadow_end = kasan_mem_to_shadow(address + size);
@@ -96,6 +92,16 @@ void kasan_poison(const void *address, size_t size, u8 value)
}
EXPORT_SYMBOL(kasan_poison);
+#ifdef CONFIG_KASAN_GENERIC
+void kasan_poison_last_granule(const void *address, size_t size)
+{
+ if (size & KASAN_GRANULE_MASK) {
+ u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
+ *shadow = size & KASAN_GRANULE_MASK;
+ }
+}
+#endif
+
void kasan_unpoison(const void *address, size_t size)
{
u8 tag = get_tag(address);
@@ -115,16 +121,12 @@ void kasan_unpoison(const void *address, size_t size)
if (is_kfence_address(address))
return;
+ /* Unpoison round_up(size, KASAN_GRANULE_SIZE) bytes. */
kasan_poison(address, size, tag);
- if (size & KASAN_GRANULE_MASK) {
- u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
-
- if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
- *shadow = tag;
- else /* CONFIG_KASAN_GENERIC */
- *shadow = size & KASAN_GRANULE_MASK;
- }
+ /* Partially poison the last granule for the generic mode. */
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC))
+ kasan_poison_last_granule(address, size);
}
#ifdef CONFIG_MEMORY_HOTPLUG