summaryrefslogtreecommitdiff
path: root/mm/ksm.c
diff options
context:
space:
mode:
authorAlex Shi (tencent) <alexs@kernel.org>2024-04-11 09:17:02 +0300
committerAndrew Morton <akpm@linux-foundation.org>2024-05-06 03:53:32 +0300
commitb91f94729d050eec86ae7ef084aa3805146c0a67 (patch)
tree97736e7a837dde3e1ffd583bc3a9f7f720fed090 /mm/ksm.c
parente9016174621112624f957c8138bd3c34692e2e93 (diff)
downloadlinux-b91f94729d050eec86ae7ef084aa3805146c0a67.tar.xz
mm/ksm: add ksm_get_folio
Patch series "transfer page to folio in KSM". This is the first part of page to folio transfer on KSM. Since only single page could be stored in KSM, we could safely transfer stable tree pages to folios. This patchset could reduce ksm.o 57kbytes from 2541776 bytes on latest akpm/mm-stable branch with CONFIG_DEBUG_VM enabled. It pass the KSM testing in LTP and kernel selftest. Thanks for Matthew Wilcox and David Hildenbrand's suggestions and comments! This patch (of 10): The ksm only contains single pages, so we could add a new func ksm_get_folio for get_ksm_page to use folio instead of pages to save a couple of compound_head calls. After all caller replaced, get_ksm_page will be removed. Link: https://lkml.kernel.org/r/20240411061713.1847574-1-alexs@kernel.org Link: https://lkml.kernel.org/r/20240411061713.1847574-2-alexs@kernel.org Signed-off-by: Alex Shi (tencent) <alexs@kernel.org> Reviewed-by: David Hildenbrand <david@redhat.com> Cc: Izik Eidus <izik.eidus@ravellosystems.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Chris Wright <chrisw@sous-sol.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/ksm.c')
-rw-r--r--mm/ksm.c42
1 files changed, 25 insertions, 17 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index 8c001819cf10..ac126a4c245c 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -897,7 +897,7 @@ enum get_ksm_page_flags {
};
/*
- * get_ksm_page: checks if the page indicated by the stable node
+ * ksm_get_folio: checks if the page indicated by the stable node
* is still its ksm page, despite having held no reference to it.
* In which case we can trust the content of the page, and it
* returns the gotten page; but if the page has now been zapped,
@@ -915,10 +915,10 @@ enum get_ksm_page_flags {
* a page to put something that might look like our key in page->mapping.
* is on its way to being freed; but it is an anomaly to bear in mind.
*/
-static struct page *get_ksm_page(struct ksm_stable_node *stable_node,
+static struct folio *ksm_get_folio(struct ksm_stable_node *stable_node,
enum get_ksm_page_flags flags)
{
- struct page *page;
+ struct folio *folio;
void *expected_mapping;
unsigned long kpfn;
@@ -926,8 +926,8 @@ static struct page *get_ksm_page(struct ksm_stable_node *stable_node,
PAGE_MAPPING_KSM);
again:
kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */
- page = pfn_to_page(kpfn);
- if (READ_ONCE(page->mapping) != expected_mapping)
+ folio = pfn_folio(kpfn);
+ if (READ_ONCE(folio->mapping) != expected_mapping)
goto stale;
/*
@@ -940,41 +940,41 @@ again:
* in folio_migrate_mapping(), it might still be our page,
* in which case it's essential to keep the node.
*/
- while (!get_page_unless_zero(page)) {
+ while (!folio_try_get(folio)) {
/*
* Another check for page->mapping != expected_mapping would
* work here too. We have chosen the !PageSwapCache test to
* optimize the common case, when the page is or is about to
* be freed: PageSwapCache is cleared (under spin_lock_irq)
* in the ref_freeze section of __remove_mapping(); but Anon
- * page->mapping reset to NULL later, in free_pages_prepare().
+ * folio->mapping reset to NULL later, in free_pages_prepare().
*/
- if (!PageSwapCache(page))
+ if (!folio_test_swapcache(folio))
goto stale;
cpu_relax();
}
- if (READ_ONCE(page->mapping) != expected_mapping) {
- put_page(page);
+ if (READ_ONCE(folio->mapping) != expected_mapping) {
+ folio_put(folio);
goto stale;
}
if (flags == GET_KSM_PAGE_TRYLOCK) {
- if (!trylock_page(page)) {
- put_page(page);
+ if (!folio_trylock(folio)) {
+ folio_put(folio);
return ERR_PTR(-EBUSY);
}
} else if (flags == GET_KSM_PAGE_LOCK)
- lock_page(page);
+ folio_lock(folio);
if (flags != GET_KSM_PAGE_NOLOCK) {
- if (READ_ONCE(page->mapping) != expected_mapping) {
- unlock_page(page);
- put_page(page);
+ if (READ_ONCE(folio->mapping) != expected_mapping) {
+ folio_unlock(folio);
+ folio_put(folio);
goto stale;
}
}
- return page;
+ return folio;
stale:
/*
@@ -990,6 +990,14 @@ stale:
return NULL;
}
+static struct page *get_ksm_page(struct ksm_stable_node *stable_node,
+ enum get_ksm_page_flags flags)
+{
+ struct folio *folio = ksm_get_folio(stable_node, flags);
+
+ return &folio->page;
+}
+
/*
* Removing rmap_item from stable or unstable tree.
* This function will clean the information from the stable/unstable tree.