summaryrefslogtreecommitdiff
path: root/mm/page_vma_mapped.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2024-03-29 01:58:27 +0300
committerAndrew Morton <akpm@linux-foundation.org>2024-04-26 06:56:31 +0300
commit7e8347413e5bc4d54712942dad43bfcf2501ab3b (patch)
tree019894cbfec801994672d9c1ed00df78cc036fd0 /mm/page_vma_mapped.c
parent835c3a25aa373d486514e4e0f5a7450ea82ae489 (diff)
downloadlinux-7e8347413e5bc4d54712942dad43bfcf2501ab3b.tar.xz
mm: correct page_mapped_in_vma() for large folios
Patch series "Unify vma_address and vma_pgoff_address". The current vma_address() pretends that the ambiguity between head & tail page is an advantage. If you pass a head page to vma_address(), it will operate on all pages in the folio, while if you pass a tail page, it will operate on a single page. That's not what any of the callers actually want, so first convert all callers to use vma_pgoff_address() and then rename vma_pgoff_address() to vma_address(). This patch (of 3): If 'page' is the first page of a large folio then vma_address() will scan for any page in the entire folio. This can lead to page_mapped_in_vma() returning true if some of the tail pages are mapped and the head page is not. This could lead to memory failure choosing to kill a task unnecessarily. Link: https://lkml.kernel.org/r/20240328225831.1765286-1-willy@infradead.org Link: https://lkml.kernel.org/r/20240328225831.1765286-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/page_vma_mapped.c')
-rw-r--r--mm/page_vma_mapped.c4
1 files changed, 3 insertions, 1 deletions
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 74d2de15fb5e..ac48d6284bad 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -325,6 +325,8 @@ next_pte:
*/
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
{
+ struct folio *folio = page_folio(page);
+ pgoff_t pgoff = folio->index + folio_page_idx(folio, page);
struct page_vma_mapped_walk pvmw = {
.pfn = page_to_pfn(page),
.nr_pages = 1,
@@ -332,7 +334,7 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
.flags = PVMW_SYNC,
};
- pvmw.address = vma_address(page, vma);
+ pvmw.address = vma_pgoff_address(pgoff, 1, vma);
if (pvmw.address == -EFAULT)
return 0;
if (!page_vma_mapped_walk(&pvmw))