summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2010-03-06 00:42:10 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-06 22:26:26 +0300
commitfc148a5f7e0532750c312385c7ee9fa3e9311f34 (patch)
treedfd132ed225a113f73c61f5e2018e5644bb3f677 /mm
parentc44b674323f4a2480dbeb65d4b487fa5f06f49e0 (diff)
downloadlinux-fc148a5f7e0532750c312385c7ee9fa3e9311f34.tar.xz
mm: remove VM_LOCK_RMAP code
When a VMA is in an inconsistent state during setup or teardown, the worst that can happen is that the rmap code will not be able to find the page. The mapping is in the process of being torn down (PTEs just got invalidated by munmap), or set up (no PTEs have been instantiated yet). It is also impossible for the rmap code to follow a pointer to an already freed VMA, because the rmap code holds the anon_vma->lock, which the VMA teardown code needs to take before the VMA is removed from the anon_vma chain. Hence, we should not need the VM_LOCK_RMAP locking at all. Signed-off-by: Rik van Riel <riel@redhat.com> Cc: Nick Piggin <npiggin@suse.de> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Larry Woodman <lwoodman@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/mmap.c15
-rw-r--r--mm/rmap.c12
2 files changed, 0 insertions, 27 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 6a0c15db7f60..f1b4448626bf 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -554,9 +554,7 @@ again: remove_next = 1 + (end > next->vm_end);
*/
if (importer && !importer->anon_vma) {
/* Block reverse map lookups until things are set up. */
- importer->vm_flags |= VM_LOCK_RMAP;
if (anon_vma_clone(importer, vma)) {
- importer->vm_flags &= ~VM_LOCK_RMAP;
return -ENOMEM;
}
importer->anon_vma = anon_vma;
@@ -618,11 +616,6 @@ again: remove_next = 1 + (end > next->vm_end);
__vma_unlink(mm, next, vma);
if (file)
__remove_shared_vm_struct(next, file, mapping);
- /*
- * This VMA is now dead, no need for rmap to follow it.
- * Call anon_vma_merge below, outside of i_mmap_lock.
- */
- next->vm_flags |= VM_LOCK_RMAP;
} else if (insert) {
/*
* split_vma has split insert from vma, and needs
@@ -635,20 +628,12 @@ again: remove_next = 1 + (end > next->vm_end);
if (mapping)
spin_unlock(&mapping->i_mmap_lock);
- /*
- * The current VMA has been set up. It is now safe for the
- * rmap code to get from the pages to the ptes.
- */
- if (anon_vma && importer)
- importer->vm_flags &= ~VM_LOCK_RMAP;
-
if (remove_next) {
if (file) {
fput(file);
if (next->vm_flags & VM_EXECUTABLE)
removed_exe_file_vma(mm);
}
- /* Protected by mmap_sem and VM_LOCK_RMAP. */
if (next->anon_vma)
anon_vma_merge(vma, next);
mm->map_count--;
diff --git a/mm/rmap.c b/mm/rmap.c
index 28bcdc433d88..4d2fb93851ca 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -329,18 +329,6 @@ vma_address(struct page *page, struct vm_area_struct *vma)
/* page should be within @vma mapping range */
return -EFAULT;
}
- if (unlikely(vma->vm_flags & VM_LOCK_RMAP)) {
- /*
- * This VMA is being unlinked or is not yet linked into the
- * VMA tree. Do not try to follow this rmap. This race
- * condition can result in page_referenced() ignoring a
- * reference or in try_to_unmap() failing to unmap a page.
- * The VMA cannot be freed under us because we hold the
- * anon_vma->lock, which the munmap code takes while
- * unlinking the anon_vmas from the VMA.
- */
- return -EFAULT;
- }
return address;
}