summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2023-07-15 07:23:43 +0300
committerAndrew Morton <akpm@linux-foundation.org>2023-08-21 23:37:27 +0300
commitca54f6d89d60abf3e7dea68c95dfd442eeece212 (patch)
treedb5db1ac60a6e83fcb68e820566b87237632ead0
parentfbcec6a3a09b309900f1ecef8954721d93555abd (diff)
downloadlinux-ca54f6d89d60abf3e7dea68c95dfd442eeece212.tar.xz
zswap: make zswap_load() take a folio
Only convert a few easy parts of this function to use the folio passed in; convert back to struct page for the majority of it. Removes three hidden calls to compound_head(). Link: https://lkml.kernel.org/r/20230715042343.434588-6-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Christoph Hellwig <hch@infradead.org> Cc: Domenico Cerasuolo <cerasuolodomenico@gmail.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Nhat Pham <nphamcs@gmail.com> Cc: Vitaly Wool <vitaly.wool@konsulko.com> Cc: Yosry Ahmed <yosryahmed@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--include/linux/zswap.h4
-rw-r--r--mm/page_io.c2
-rw-r--r--mm/zswap.c9
3 files changed, 8 insertions, 7 deletions
diff --git a/include/linux/zswap.h b/include/linux/zswap.h
index 9f318c8bc367..2a60ce39cfde 100644
--- a/include/linux/zswap.h
+++ b/include/linux/zswap.h
@@ -11,7 +11,7 @@ extern atomic_t zswap_stored_pages;
#ifdef CONFIG_ZSWAP
bool zswap_store(struct folio *folio);
-bool zswap_load(struct page *page);
+bool zswap_load(struct folio *folio);
void zswap_invalidate(int type, pgoff_t offset);
void zswap_swapon(int type);
void zswap_swapoff(int type);
@@ -23,7 +23,7 @@ static inline bool zswap_store(struct folio *folio)
return false;
}
-static inline bool zswap_load(struct page *page)
+static inline bool zswap_load(struct folio *folio)
{
return false;
}
diff --git a/mm/page_io.c b/mm/page_io.c
index e3d62c1a0834..fe4c21af23f2 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -513,7 +513,7 @@ void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug)
}
delayacct_swapin_start();
- if (zswap_load(page)) {
+ if (zswap_load(folio)) {
folio_mark_uptodate(folio);
folio_unlock(folio);
} else if (data_race(sis->flags & SWP_FS_OPS)) {
diff --git a/mm/zswap.c b/mm/zswap.c
index 9df33298f2dc..7cc4a2baa713 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1405,11 +1405,12 @@ shrink:
goto reject;
}
-bool zswap_load(struct page *page)
+bool zswap_load(struct folio *folio)
{
- swp_entry_t swp = { .val = page_private(page), };
+ swp_entry_t swp = folio_swap_entry(folio);
int type = swp_type(swp);
pgoff_t offset = swp_offset(swp);
+ struct page *page = &folio->page;
struct zswap_tree *tree = zswap_trees[type];
struct zswap_entry *entry;
struct scatterlist input, output;
@@ -1419,7 +1420,7 @@ bool zswap_load(struct page *page)
unsigned int dlen;
bool ret;
- VM_WARN_ON_ONCE(!PageLocked(page));
+ VM_WARN_ON_ONCE(!folio_test_locked(folio));
/* find */
spin_lock(&tree->lock);
@@ -1481,7 +1482,7 @@ freeentry:
spin_lock(&tree->lock);
if (ret && zswap_exclusive_loads_enabled) {
zswap_invalidate_entry(tree, entry);
- SetPageDirty(page);
+ folio_mark_dirty(folio);
} else if (entry->length) {
spin_lock(&entry->pool->lru_lock);
list_move(&entry->lru, &entry->pool->lru);