summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorxu xin <xu.xin16@zte.com.cn>2023-06-13 06:09:38 +0300
committerAndrew Morton <akpm@linux-foundation.org>2023-08-18 20:12:10 +0300
commit6080d19f07043ade61094d0f58b14c05e1694a39 (patch)
tree69a1aae1ba5e5f8fdc67f7dc9e3f4f89fea44f97 /mm
parente2942062e01df85b4692460fe5b48ab0c90fdb95 (diff)
downloadlinux-6080d19f07043ade61094d0f58b14c05e1694a39.tar.xz
ksm: add ksm zero pages for each process
As the number of ksm zero pages is not included in ksm_merging_pages per process when enabling use_zero_pages, it's unclear of how many actual pages are merged by KSM. To let users accurately estimate their memory demands when unsharing KSM zero-pages, it's necessary to show KSM zero- pages per process. In addition, it help users to know the actual KSM profit because KSM-placed zero pages are also benefit from KSM. since unsharing zero pages placed by KSM accurately is achieved, then tracking empty pages merging and unmerging is not a difficult thing any longer. Since we already have /proc/<pid>/ksm_stat, just add the information of 'ksm_zero_pages' in it. Link: https://lkml.kernel.org/r/20230613030938.185993-1-yang.yang29@zte.com.cn Signed-off-by: xu xin <xu.xin16@zte.com.cn> Acked-by: David Hildenbrand <david@redhat.com> Reviewed-by: Xiaokai Ran <ran.xiaokai@zte.com.cn> Reviewed-by: Yang Yang <yang.yang29@zte.com.cn> Cc: Claudio Imbrenda <imbrenda@linux.ibm.com> Cc: Xuexin Jiang <jiang.xuexin@zte.com.cn> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/khugepaged.c2
-rw-r--r--mm/ksm.c1
-rw-r--r--mm/memory.c4
3 files changed, 4 insertions, 3 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 419981dcc889..4b8b8673d5d9 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -710,7 +710,7 @@ static void __collapse_huge_page_copy_succeeded(pte_t *pte,
spin_lock(ptl);
ptep_clear(vma->vm_mm, address, _pte);
spin_unlock(ptl);
- ksm_might_unmap_zero_page(pteval);
+ ksm_might_unmap_zero_page(vma->vm_mm, pteval);
}
} else {
src_page = pte_page(pteval);
diff --git a/mm/ksm.c b/mm/ksm.c
index e037d9aad691..e1772081e8cb 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1233,6 +1233,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
*/
newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot)));
ksm_zero_pages++;
+ mm->ksm_zero_pages++;
/*
* We're replacing an anonymous page with a zero page, which is
* not anonymous. We need to do proper accounting otherwise we
diff --git a/mm/memory.c b/mm/memory.c
index c256da05bb5e..5f863b1a0edc 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1434,7 +1434,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
zap_install_uffd_wp_if_needed(vma, addr, pte, details,
ptent);
if (unlikely(!page)) {
- ksm_might_unmap_zero_page(ptent);
+ ksm_might_unmap_zero_page(mm, ptent);
continue;
}
@@ -3130,7 +3130,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
inc_mm_counter(mm, MM_ANONPAGES);
}
} else {
- ksm_might_unmap_zero_page(vmf->orig_pte);
+ ksm_might_unmap_zero_page(mm, vmf->orig_pte);
inc_mm_counter(mm, MM_ANONPAGES);
}
flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));