summaryrefslogtreecommitdiff
path: root/mm/migrate_device.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/migrate_device.c')
-rw-r--r--mm/migrate_device.c41
1 files changed, 20 insertions, 21 deletions
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 66206734b1b9..aecc71972a87 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -71,7 +71,7 @@ again:
return migrate_vma_collect_hole(start, end, -1, walk);
if (pmd_trans_huge(*pmdp)) {
- struct page *page;
+ struct folio *folio;
ptl = pmd_lock(mm, pmdp);
if (unlikely(!pmd_trans_huge(*pmdp))) {
@@ -79,21 +79,21 @@ again:
goto again;
}
- page = pmd_page(*pmdp);
- if (is_huge_zero_page(page)) {
+ folio = pmd_folio(*pmdp);
+ if (is_huge_zero_folio(folio)) {
spin_unlock(ptl);
split_huge_pmd(vma, pmdp, addr);
} else {
int ret;
- get_page(page);
+ folio_get(folio);
spin_unlock(ptl);
- if (unlikely(!trylock_page(page)))
+ if (unlikely(!folio_trylock(folio)))
return migrate_vma_collect_skip(start, end,
walk);
- ret = split_huge_page(page);
- unlock_page(page);
- put_page(page);
+ ret = split_folio(folio);
+ folio_unlock(folio);
+ folio_put(folio);
if (ret)
return migrate_vma_collect_skip(start, end,
walk);
@@ -324,6 +324,8 @@ static void migrate_vma_collect(struct migrate_vma *migrate)
*/
static bool migrate_vma_check_page(struct page *page, struct page *fault_page)
{
+ struct folio *folio = page_folio(page);
+
/*
* One extra ref because caller holds an extra reference, either from
* isolate_lru_page() for a regular page, or migrate_vma_collect() for
@@ -336,18 +338,18 @@ static bool migrate_vma_check_page(struct page *page, struct page *fault_page)
* check them than regular pages, because they can be mapped with a pmd
* or with a pte (split pte mapping).
*/
- if (PageCompound(page))
+ if (folio_test_large(folio))
return false;
/* Page from ZONE_DEVICE have one extra reference */
- if (is_zone_device_page(page))
+ if (folio_is_zone_device(folio))
extra++;
/* For file back page */
- if (page_mapping(page))
- extra += 1 + page_has_private(page);
+ if (folio_mapping(folio))
+ extra += 1 + folio_has_private(folio);
- if ((page_count(page) - extra) > page_mapcount(page))
+ if ((folio_ref_count(folio) - extra) > folio_mapcount(folio))
return false;
return true;
@@ -690,6 +692,7 @@ static void __migrate_device_pages(unsigned long *src_pfns,
struct page *newpage = migrate_pfn_to_page(dst_pfns[i]);
struct page *page = migrate_pfn_to_page(src_pfns[i]);
struct address_space *mapping;
+ struct folio *folio;
int r;
if (!newpage) {
@@ -724,15 +727,12 @@ static void __migrate_device_pages(unsigned long *src_pfns,
continue;
}
- mapping = page_mapping(page);
+ folio = page_folio(page);
+ mapping = folio_mapping(folio);
if (is_device_private_page(newpage) ||
is_device_coherent_page(newpage)) {
if (mapping) {
- struct folio *folio;
-
- folio = page_folio(page);
-
/*
* For now only support anonymous memory migrating to
* device private or coherent memory.
@@ -755,11 +755,10 @@ static void __migrate_device_pages(unsigned long *src_pfns,
if (migrate && migrate->fault_page == page)
r = migrate_folio_extra(mapping, page_folio(newpage),
- page_folio(page),
- MIGRATE_SYNC_NO_COPY, 1);
+ folio, MIGRATE_SYNC_NO_COPY, 1);
else
r = migrate_folio(mapping, page_folio(newpage),
- page_folio(page), MIGRATE_SYNC_NO_COPY);
+ folio, MIGRATE_SYNC_NO_COPY);
if (r != MIGRATEPAGE_SUCCESS)
src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
}