summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2024-03-26 23:28:23 +0300
committerAndrew Morton <akpm@linux-foundation.org>2024-04-26 06:56:19 +0300
commite06d03d5590ae1c257b8aa2cfbfe6765e0755c14 (patch)
treee1300c67bcfaff343e73595ccb961ea082af64a3 /mm
parent5beaee54a324ba1fe307e341ec825d5d099f4091 (diff)
downloadlinux-e06d03d5590ae1c257b8aa2cfbfe6765e0755c14.tar.xz
mm: add pmd_folio()
Convert directly from a pmd to a folio without going through another representation first. For now this is just a slightly shorter way to write it, but it might end up being more efficient later. Link: https://lkml.kernel.org/r/20240326202833.523759-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c6
-rw-r--r--mm/madvise.c2
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/mlock.c2
-rw-r--r--mm/userfaultfd.c2
5 files changed, 7 insertions, 7 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 5c043c7b5062..712263e3b1f6 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1816,7 +1816,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
goto out;
}
- folio = pfn_folio(pmd_pfn(orig_pmd));
+ folio = pmd_folio(orig_pmd);
/*
* If other processes are mapping this folio, we couldn't discard
* the folio unless they all do MADV_FREE so let's skip the folio.
@@ -2086,7 +2086,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (pmd_protnone(*pmd))
goto unlock;
- folio = page_folio(pmd_page(*pmd));
+ folio = pmd_folio(*pmd);
toptier = node_is_toptier(folio_nid(folio));
/*
* Skip scanning top tier node if normal numa
@@ -2663,7 +2663,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
* It's safe to call pmd_page when folio is set because it's
* guaranteed that pmd is present.
*/
- if (folio && folio != page_folio(pmd_page(*pmd)))
+ if (folio && folio != pmd_folio(*pmd))
goto out;
__split_huge_pmd_locked(vma, pmd, range.start, freeze);
}
diff --git a/mm/madvise.c b/mm/madvise.c
index 7625830d6ae9..1f77a51baaac 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -363,7 +363,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
goto huge_unlock;
}
- folio = pfn_folio(pmd_pfn(orig_pmd));
+ folio = pmd_folio(orig_pmd);
/* Do not interfere with other mappings of this folio */
if (folio_likely_mapped_shared(folio))
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 5743028a63a5..aec756ae5637 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -509,7 +509,7 @@ static void queue_folios_pmd(pmd_t *pmd, struct mm_walk *walk)
qp->nr_failed++;
return;
}
- folio = pfn_folio(pmd_pfn(*pmd));
+ folio = pmd_folio(*pmd);
if (is_huge_zero_folio(folio)) {
walk->action = ACTION_CONTINUE;
return;
diff --git a/mm/mlock.c b/mm/mlock.c
index 1ed2f2ab37cd..30b51cdea89d 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -378,7 +378,7 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
goto out;
if (is_huge_zero_pmd(*pmd))
goto out;
- folio = page_folio(pmd_page(*pmd));
+ folio = pmd_folio(*pmd);
if (vma->vm_flags & VM_LOCKED)
mlock_folio(folio);
else
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index a0ec14553fbe..b70618e8dcd2 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -1662,7 +1662,7 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
/* Check if we can move the pmd without splitting it. */
if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) ||
!pmd_none(dst_pmdval)) {
- struct folio *folio = pfn_folio(pmd_pfn(*src_pmd));
+ struct folio *folio = pmd_folio(*src_pmd);
if (!folio || (!is_huge_zero_folio(folio) &&
!PageAnonExclusive(&folio->page))) {