summaryrefslogtreecommitdiff
path: root/mm/mmap.c
diff options
context:
space:
mode:
authorLiam R. Howlett <Liam.Howlett@Oracle.com>2022-09-06 22:48:51 +0300
committerAndrew Morton <akpm@linux-foundation.org>2022-09-27 05:46:18 +0300
commit7964cf8caa4dfa42c4149f3833d3878713cda3dc (patch)
tree4d8796756afa9f0b66a1daccea33dc935db687a2 /mm/mmap.c
parent4dd1b84140c1b87a89d69a683bebbbdaeb620e39 (diff)
downloadlinux-7964cf8caa4dfa42c4149f3833d3878713cda3dc.tar.xz
mm: remove vmacache
By using the maple tree and the maple tree state, the vmacache is no longer beneficial and is complicating the VMA code. Remove the vmacache to reduce the work in keeping it up to date and code complexity. Link: https://lkml.kernel.org/r/20220906194824.2110408-26-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Tested-by: Yu Zhao <yuzhao@google.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: David Hildenbrand <david@redhat.com> Cc: David Howells <dhowells@redhat.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org> Cc: SeongJae Park <sj@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c31
1 files changed, 2 insertions, 29 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 7a1adc916957..7872642e8993 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -14,7 +14,6 @@
#include <linux/backing-dev.h>
#include <linux/mm.h>
#include <linux/mm_inline.h>
-#include <linux/vmacache.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
@@ -680,9 +679,6 @@ inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma,
/* Remove from mm linked list - also updates highest_vm_end */
__vma_unlink_list(mm, next);
- /* Kill the cache */
- vmacache_invalidate(mm);
-
if (file)
__remove_shared_vm_struct(next, file, mapping);
@@ -923,8 +919,6 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
__vma_unlink_list(mm, next);
if (remove_next == 2)
__vma_unlink_list(mm, next_next);
- /* Kill the cache */
- vmacache_invalidate(mm);
if (file) {
__remove_shared_vm_struct(next, file, mapping);
@@ -2233,19 +2227,10 @@ struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
unsigned long start_addr,
unsigned long end_addr)
{
- struct vm_area_struct *vma;
unsigned long index = start_addr;
mmap_assert_locked(mm);
- /* Check the cache first. */
- vma = vmacache_find(mm, start_addr);
- if (likely(vma))
- return vma;
-
- vma = mt_find(&mm->mm_mt, &index, end_addr - 1);
- if (vma)
- vmacache_update(start_addr, vma);
- return vma;
+ return mt_find(&mm->mm_mt, &index, end_addr - 1);
}
EXPORT_SYMBOL(find_vma_intersection);
@@ -2259,19 +2244,10 @@ EXPORT_SYMBOL(find_vma_intersection);
*/
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
{
- struct vm_area_struct *vma;
unsigned long index = addr;
mmap_assert_locked(mm);
- /* Check the cache first. */
- vma = vmacache_find(mm, addr);
- if (likely(vma))
- return vma;
-
- vma = mt_find(&mm->mm_mt, &index, ULONG_MAX);
- if (vma)
- vmacache_update(addr, vma);
- return vma;
+ return mt_find(&mm->mm_mt, &index, ULONG_MAX);
}
EXPORT_SYMBOL(find_vma);
@@ -2660,9 +2636,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct ma_state *mas,
mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
tail_vma->vm_next = NULL;
- /* Kill the cache */
- vmacache_invalidate(mm);
-
/*
* Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
* VM_GROWSUP VMA. Such VMAs can change their size under