summaryrefslogtreecommitdiff
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-01-28 22:29:43 +0300
committerMatthew Wilcox (Oracle) <willy@infradead.org>2022-03-21 20:01:32 +0300
commit4b8554c527f3cfa183f6c06d231a9387873205a0 (patch)
treecba1023980f8eaca5ae0f9c917056179113a1516 /mm/huge_memory.c
parent869f7ee6f6477341f859c8b0949ae81caf9ca7f3 (diff)
downloadlinux-4b8554c527f3cfa183f6c06d231a9387873205a0.tar.xz
mm/rmap: Convert try_to_migrate() to folios
Convert the callers to pass a folio and the try_to_migrate_one() worker to use a folio throughout. Fixes an assumption that a folio must be <= PMD size. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index de684427f79c..7df1934d6528 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2262,8 +2262,8 @@ static void unmap_page(struct page *page)
* pages can simply be left unmapped, then faulted back on demand.
* If that is ever changed (perhaps for mlock), update remap_page().
*/
- if (PageAnon(page))
- try_to_migrate(page, ttu_flags);
+ if (folio_test_anon(folio))
+ try_to_migrate(folio, ttu_flags);
else
try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);