summaryrefslogtreecommitdiff
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorKairui Song <kasong@tencent.com>2024-04-15 20:18:54 +0300
committerAndrew Morton <akpm@linux-foundation.org>2024-04-26 06:56:09 +0300
commitb2ebcf9d3d5a0108f640d8c8200ece8848045725 (patch)
treea300779f990e54b00025255c92a37e66c8a8919f /mm/filemap.c
parentde60fd8ddeda2b41fbe11df11733838c5f684616 (diff)
downloadlinux-b2ebcf9d3d5a0108f640d8c8200ece8848045725.tar.xz
mm/filemap: clean up hugetlb exclusion code
__filemap_add_folio only has two callers, one never passes hugetlb folio and one always passes in hugetlb folio. So move the hugetlb related cgroup charging out of it to make the code cleaner. Link: https://lkml.kernel.org/r/20240415171857.19244-3-ryncsn@gmail.com Signed-off-by: Kairui Song <kasong@tencent.com> Acked-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c21
1 files changed, 8 insertions, 13 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 12089c24abfb..17a66ea544e7 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -853,20 +853,12 @@ noinline int __filemap_add_folio(struct address_space *mapping,
{
XA_STATE(xas, &mapping->i_pages, index);
bool huge = folio_test_hugetlb(folio);
- bool charged = false;
- long nr = 1;
+ long nr;
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
mapping_set_update(&xas, mapping);
- if (!huge) {
- int error = mem_cgroup_charge(folio, NULL, gfp);
- if (error)
- return error;
- charged = true;
- }
-
VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
xas_set_order(&xas, index, folio_order(folio));
nr = folio_nr_pages(folio);
@@ -931,8 +923,6 @@ unlock:
trace_mm_filemap_add_to_page_cache(folio);
return 0;
error:
- if (charged)
- mem_cgroup_uncharge(folio);
folio->mapping = NULL;
/* Leave page->index set: truncation relies upon it */
folio_put_refs(folio, nr);
@@ -946,11 +936,16 @@ int filemap_add_folio(struct address_space *mapping, struct folio *folio,
void *shadow = NULL;
int ret;
+ ret = mem_cgroup_charge(folio, NULL, gfp);
+ if (ret)
+ return ret;
+
__folio_set_locked(folio);
ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ mem_cgroup_uncharge(folio);
__folio_clear_locked(folio);
- else {
+ } else {
/*
* The folio might have been evicted from cache only
* recently, in which case it should be activated like