summaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2021-12-24 00:39:05 +0300
committerMatthew Wilcox (Oracle) <willy@infradead.org>2022-03-21 19:59:01 +0300
commit8927f6473e56e32e328ae8ed43736412f7f76a4e (patch)
tree5ca7fde99f04b289af051cb4cceb01a3143d45cc /mm/vmscan.c
parent1b7f7e58decccb52d6bc454413e3298f1ab3a9c6 (diff)
downloadlinux-8927f6473e56e32e328ae8ed43736412f7f76a4e.tar.xz
mm/workingset: Convert workingset_eviction() to take a folio
This removes an assumption that THPs are the only kind of compound pages and removes a few hidden calls to compound_head(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 55fb6d8e30fd..3b96f6e7d895 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1242,6 +1242,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
static int __remove_mapping(struct address_space *mapping, struct page *page,
bool reclaimed, struct mem_cgroup *target_memcg)
{
+ struct folio *folio = page_folio(page);
int refcount;
void *shadow = NULL;
@@ -1289,7 +1290,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
swp_entry_t swap = { .val = page_private(page) };
mem_cgroup_swapout(page, swap);
if (reclaimed && !mapping_exiting(mapping))
- shadow = workingset_eviction(page, target_memcg);
+ shadow = workingset_eviction(folio, target_memcg);
__delete_from_swap_cache(page, swap, shadow);
xa_unlock_irq(&mapping->i_pages);
put_swap_page(page, swap);
@@ -1315,8 +1316,8 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
*/
if (reclaimed && page_is_file_lru(page) &&
!mapping_exiting(mapping) && !dax_mapping(mapping))
- shadow = workingset_eviction(page, target_memcg);
- __delete_from_page_cache(page, shadow);
+ shadow = workingset_eviction(folio, target_memcg);
+ __filemap_remove_folio(folio, shadow);
xa_unlock_irq(&mapping->i_pages);
if (mapping_shrinkable(mapping))
inode_add_lru(mapping->host);