summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-09-02 22:46:12 +0300
committerAndrew Morton <akpm@linux-foundation.org>2022-10-04 00:02:47 +0300
commit6599591816f522c1cc8ec4eb5cea75738963756a (patch)
treec377c91fb62cf809e11bf327392cafb379ddd513 /mm
parentd4f9565ae598bd6b6ffbd8b4dfbf97a9e339da2d (diff)
downloadlinux-6599591816f522c1cc8ec4eb5cea75738963756a.tar.xz
memcg: convert mem_cgroup_swapin_charge_page() to mem_cgroup_swapin_charge_folio()
All callers now have a folio, so pass it in here and remove an unnecessary call to page_folio(). Link: https://lkml.kernel.org/r/20220902194653.1739778-17-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c13
-rw-r--r--mm/memory.c2
-rw-r--r--mm/swap_state.c2
3 files changed, 8 insertions, 9 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e804056422db..621b4472c409 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6844,21 +6844,20 @@ int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
}
/**
- * mem_cgroup_swapin_charge_page - charge a newly allocated page for swapin
- * @page: page to charge
+ * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
+ * @folio: folio to charge.
* @mm: mm context of the victim
* @gfp: reclaim mode
- * @entry: swap entry for which the page is allocated
+ * @entry: swap entry for which the folio is allocated
*
- * This function charges a page allocated for swapin. Please call this before
- * adding the page to the swapcache.
+ * This function charges a folio allocated for swapin. Please call this before
+ * adding the folio to the swapcache.
*
* Returns 0 on success. Otherwise, an error code is returned.
*/
-int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
+int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
gfp_t gfp, swp_entry_t entry)
{
- struct folio *folio = page_folio(page);
struct mem_cgroup *memcg;
unsigned short id;
int ret;
diff --git a/mm/memory.c b/mm/memory.c
index 1e114438f606..b36b177e0ea9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3783,7 +3783,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
__folio_set_locked(folio);
__folio_set_swapbacked(folio);
- if (mem_cgroup_swapin_charge_page(page,
+ if (mem_cgroup_swapin_charge_folio(folio,
vma->vm_mm, GFP_KERNEL,
entry)) {
ret = VM_FAULT_OOM;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index ea354efd3735..a7e0438902dd 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -480,7 +480,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
__folio_set_locked(folio);
__folio_set_swapbacked(folio);
- if (mem_cgroup_swapin_charge_page(&folio->page, NULL, gfp_mask, entry))
+ if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
goto fail_unlock;
/* May fail (-ENOMEM) if XArray node allocation failed. */