summaryrefslogtreecommitdiff
path: root/mm/khugepaged.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2024-04-03 20:18:31 +0300
committerAndrew Morton <akpm@linux-foundation.org>2024-04-26 06:56:33 +0300
commitd5ab50b9412c0bba750eef5a34fd2937de1aee55 (patch)
tree6f2b73b5a97c876e2863e18246de3d5e2278e47e /mm/khugepaged.c
parent4746f5ce0fa52e21b5fe432970fe9516d1a45ebc (diff)
downloadlinux-d5ab50b9412c0bba750eef5a34fd2937de1aee55.tar.xz
khugepaged: convert alloc_charge_hpage to alloc_charge_folio
Both callers want to deal with a folio, so return a folio from this function. Link: https://lkml.kernel.org/r/20240403171838.1445826-3-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/khugepaged.c')
-rw-r--r--mm/khugepaged.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index ad16dd8b26a8..2f1dacd65d12 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1045,7 +1045,7 @@ out:
return result;
}
-static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
+static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
struct collapse_control *cc)
{
gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
@@ -1055,7 +1055,7 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, &cc->alloc_nmask);
if (!folio) {
- *hpage = NULL;
+ *foliop = NULL;
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
return SCAN_ALLOC_HUGE_PAGE_FAIL;
}
@@ -1063,13 +1063,13 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
count_vm_event(THP_COLLAPSE_ALLOC);
if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
folio_put(folio);
- *hpage = NULL;
+ *foliop = NULL;
return SCAN_CGROUP_CHARGE_FAIL;
}
count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1);
- *hpage = folio_page(folio, 0);
+ *foliop = folio;
return SCAN_SUCCEED;
}
@@ -1098,7 +1098,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
*/
mmap_read_unlock(mm);
- result = alloc_charge_hpage(&hpage, mm, cc);
+ result = alloc_charge_folio(&folio, mm, cc);
+ hpage = &folio->page;
if (result != SCAN_SUCCEED)
goto out_nolock;
@@ -1204,7 +1205,6 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
if (unlikely(result != SCAN_SUCCEED))
goto out_up_write;
- folio = page_folio(hpage);
/*
* The smp_wmb() inside __folio_mark_uptodate() ensures the
* copy_huge_page writes become visible before the set_pmd_at()
@@ -1789,7 +1789,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
struct page *hpage;
struct page *page;
struct page *tmp;
- struct folio *folio;
+ struct folio *folio, *new_folio;
pgoff_t index = 0, end = start + HPAGE_PMD_NR;
LIST_HEAD(pagelist);
XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
@@ -1800,7 +1800,8 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
- result = alloc_charge_hpage(&hpage, mm, cc);
+ result = alloc_charge_folio(&new_folio, mm, cc);
+ hpage = &new_folio->page;
if (result != SCAN_SUCCEED)
goto out;