summaryrefslogtreecommitdiff
path: root/mm/memory-failure.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-02-02 07:33:08 +0300
committerMatthew Wilcox (Oracle) <willy@infradead.org>2022-03-21 20:01:35 +0300
commit9595d76942b8714627d670a7e7ae543812c731ae (patch)
treefebd0a334505f4e11fcd427b96e740ab0548a3cc /mm/memory-failure.c
parentc8423186078312d344474bcb9e2b1ce0a78dbde4 (diff)
downloadlinux-9595d76942b8714627d670a7e7ae543812c731ae.tar.xz
mm/rmap: Turn page_lock_anon_vma_read() into folio_lock_anon_vma_read()
Add back page_lock_anon_vma_read() as a wrapper. This saves a few calls to compound_head(). If any callers were passing a tail page before, this would have failed to lock the anon VMA as page->mapping is not valid for tail pages. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Diffstat (limited to 'mm/memory-failure.c')
-rw-r--r--mm/memory-failure.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 258913d5e036..aa8236848949 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -487,12 +487,13 @@ static struct task_struct *task_early_kill(struct task_struct *tsk,
static void collect_procs_anon(struct page *page, struct list_head *to_kill,
int force_early)
{
+ struct folio *folio = page_folio(page);
struct vm_area_struct *vma;
struct task_struct *tsk;
struct anon_vma *av;
pgoff_t pgoff;
- av = page_lock_anon_vma_read(page);
+ av = folio_lock_anon_vma_read(folio);
if (av == NULL) /* Not actually mapped anymore */
return;