summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/ksm.h6
-rw-r--r--mm/ksm.c11
2 files changed, 14 insertions, 3 deletions
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 899a314bc487..98878107244f 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -26,6 +26,12 @@ int ksm_disable(struct mm_struct *mm);
int __ksm_enter(struct mm_struct *mm);
void __ksm_exit(struct mm_struct *mm);
+/*
+ * To identify zeropages that were mapped by KSM, we reuse the dirty bit
+ * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when
+ * deduplicating memory.
+ */
+#define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
diff --git a/mm/ksm.c b/mm/ksm.c
index ba266359da55..99519e22a761 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -448,7 +448,8 @@ static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long nex
if (is_migration_entry(entry))
page = pfn_swap_entry_to_page(entry);
}
- ret = page && PageKsm(page);
+ /* return 1 if the page is an normal ksm page or KSM-placed zero page */
+ ret = (page && PageKsm(page)) || is_ksm_zero_pte(*pte);
pte_unmap_unlock(pte, ptl);
return ret;
}
@@ -1222,8 +1223,12 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
page_add_anon_rmap(kpage, vma, addr, RMAP_NONE);
newpte = mk_pte(kpage, vma->vm_page_prot);
} else {
- newpte = pte_mkspecial(pfn_pte(page_to_pfn(kpage),
- vma->vm_page_prot));
+ /*
+ * Use pte_mkdirty to mark the zero page mapped by KSM, and then
+ * we can easily track all KSM-placed zero pages by checking if
+ * the dirty bit in zero page's PTE is set.
+ */
+ newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot)));
/*
* We're replacing an anonymous page with a zero page, which is
* not anonymous. We need to do proper accounting otherwise we