From 37bc2ff506b184411e4cc80f111c638b2b4c83d4 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 12 Apr 2024 20:35:00 +0100 Subject: mm: return the address from page_mapped_in_vma() The only user of this function calls page_address_in_vma() immediately after page_mapped_in_vma() calculates it and uses it to return true/false. Return the address instead, allowing memory-failure to skip the call to page_address_in_vma(). Link: https://lkml.kernel.org/r/20240412193510.2356957-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Miaohe Lin Reviewed-by: Jane Chu Cc: Dan Williams Cc: Oscar Salvador Signed-off-by: Andrew Morton --- include/linux/rmap.h | 2 +- mm/memory-failure.c | 22 +++++++++++++--------- mm/page_vma_mapped.c | 16 +++++++++------- 3 files changed, 23 insertions(+), 17 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 0f906dc6d280..7229b9baf20d 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -730,7 +730,7 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked); -int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); +unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); /* * rmap_walk_control: To control rmap traversing for specific needs diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 9e1a7d8ca745..12e5d2844cb1 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -473,10 +473,11 @@ static void __add_to_kill(struct task_struct *tsk, struct page *p, } static void add_to_kill_anon_file(struct task_struct *tsk, struct page *p, - struct vm_area_struct *vma, - struct list_head *to_kill) + struct vm_area_struct *vma, struct list_head *to_kill, + unsigned long addr) { - unsigned long addr = page_address_in_vma(p, vma); + if (addr == -EFAULT) + return; __add_to_kill(tsk, p, vma, to_kill, addr); } @@ -601,7 +602,6 @@ struct task_struct *task_early_kill(struct task_struct *tsk, int force_early) static void collect_procs_anon(struct folio *folio, struct page *page, struct list_head *to_kill, int force_early) { - struct vm_area_struct *vma; struct task_struct *tsk; struct anon_vma *av; pgoff_t pgoff; @@ -613,8 +613,10 @@ static void collect_procs_anon(struct folio *folio, struct page *page, pgoff = page_to_pgoff(page); rcu_read_lock(); for_each_process(tsk) { + struct vm_area_struct *vma; struct anon_vma_chain *vmac; struct task_struct *t = task_early_kill(tsk, force_early); + unsigned long addr; if (!t) continue; @@ -623,9 +625,8 @@ static void collect_procs_anon(struct folio *folio, struct page *page, vma = vmac->vma; if (vma->vm_mm != t->mm) continue; - if (!page_mapped_in_vma(page, vma)) - continue; - add_to_kill_anon_file(t, page, vma, to_kill); + addr = page_mapped_in_vma(page, vma); + add_to_kill_anon_file(t, page, vma, to_kill, addr); } } rcu_read_unlock(); @@ -648,6 +649,7 @@ static void collect_procs_file(struct folio *folio, struct page *page, pgoff = page_to_pgoff(page); for_each_process(tsk) { struct task_struct *t = task_early_kill(tsk, force_early); + unsigned long addr; if (!t) continue; @@ -660,8 +662,10 @@ static void collect_procs_file(struct folio *folio, struct page *page, * Assume applications who requested early kill want * to be informed of all such data corruptions. */ - if (vma->vm_mm == t->mm) - add_to_kill_anon_file(t, page, vma, to_kill); + if (vma->vm_mm != t->mm) + continue; + addr = page_address_in_vma(page, vma); + add_to_kill_anon_file(t, page, vma, to_kill, addr); } } rcu_read_unlock(); diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 53b8868ede61..c202eab84936 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -319,11 +319,12 @@ next_pte: * @page: the page to test * @vma: the VMA to test * - * Returns 1 if the page is mapped into the page tables of the VMA, 0 - * if the page is not mapped into the page tables of this VMA. Only - * valid for normal file or anonymous VMAs. + * Return: The address the page is mapped at if the page is in the range + * covered by the VMA and present in the page table. If the page is + * outside the VMA or not present, returns -EFAULT. + * Only valid for normal file or anonymous VMAs. */ -int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) +unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) { struct folio *folio = page_folio(page); pgoff_t pgoff = folio->index + folio_page_idx(folio, page); @@ -336,9 +337,10 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) pvmw.address = vma_address(vma, pgoff, 1); if (pvmw.address == -EFAULT) - return 0; + goto out; if (!page_vma_mapped_walk(&pvmw)) - return 0; + return -EFAULT; page_vma_mapped_walk_done(&pvmw); - return 1; +out: + return pvmw.address; } -- cgit v1.2.3