summaryrefslogtreecommitdiff
path: root/mm/kasan/shadow.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/kasan/shadow.c')
-rw-r--r--mm/kasan/shadow.c16
1 files changed, 7 insertions, 9 deletions
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index 5faba876456f..ba84e5106585 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -161,7 +161,7 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb,
shadow_end = shadow_start + shadow_size;
if (WARN_ON(mem_data->nr_pages % KASAN_GRANULE_SIZE) ||
- WARN_ON(start_kaddr % (KASAN_GRANULE_SIZE << PAGE_SHIFT)))
+ WARN_ON(start_kaddr % KASAN_MEMORY_PER_SHADOW_PAGE))
return NOTIFY_BAD;
switch (action) {
@@ -432,22 +432,20 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long region_start, region_end;
unsigned long size;
- region_start = ALIGN(start, PAGE_SIZE * KASAN_GRANULE_SIZE);
- region_end = ALIGN_DOWN(end, PAGE_SIZE * KASAN_GRANULE_SIZE);
+ region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE);
+ region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE);
- free_region_start = ALIGN(free_region_start,
- PAGE_SIZE * KASAN_GRANULE_SIZE);
+ free_region_start = ALIGN(free_region_start, KASAN_MEMORY_PER_SHADOW_PAGE);
if (start != region_start &&
free_region_start < region_start)
- region_start -= PAGE_SIZE * KASAN_GRANULE_SIZE;
+ region_start -= KASAN_MEMORY_PER_SHADOW_PAGE;
- free_region_end = ALIGN_DOWN(free_region_end,
- PAGE_SIZE * KASAN_GRANULE_SIZE);
+ free_region_end = ALIGN_DOWN(free_region_end, KASAN_MEMORY_PER_SHADOW_PAGE);
if (end != region_end &&
free_region_end > region_end)
- region_end += PAGE_SIZE * KASAN_GRANULE_SIZE;
+ region_end += KASAN_MEMORY_PER_SHADOW_PAGE;
shadow_start = kasan_mem_to_shadow((void *)region_start);
shadow_end = kasan_mem_to_shadow((void *)region_end);