summaryrefslogtreecommitdiff
path: root/mm/ksm.c
diff options
context:
space:
mode:
authorLonglong Xia <xialonglong1@huawei.com>2023-04-14 05:17:41 +0300
committerAndrew Morton <akpm@linux-foundation.org>2023-04-19 02:53:52 +0300
commit4248d0083ec5817eebfb916c54950d100b3468ee (patch)
treeb0f64bae03987593a20444657b0afd07471bf6b2 /mm/ksm.c
parent4f775086a6eee07c6ae4be4734d736e13b537351 (diff)
downloadlinux-4248d0083ec5817eebfb916c54950d100b3468ee.tar.xz
mm: ksm: support hwpoison for ksm page
hwpoison_user_mappings() is updated to support ksm pages, and add collect_procs_ksm() to collect processes when the error hit an ksm page. The difference from collect_procs_anon() is that it also needs to traverse the rmap-item list on the stable node of the ksm page. At the same time, add_to_kill_ksm() is added to handle ksm pages. And task_in_to_kill_list() is added to avoid duplicate addition of tsk to the to_kill list. This is because when scanning the list, if the pages that make up the ksm page all come from the same process, they may be added repeatedly. Link: https://lkml.kernel.org/r/20230414021741.2597273-3-xialonglong1@huawei.com Signed-off-by: Longlong Xia <xialonglong1@huawei.com> Tested-by: Naoya Horiguchi <naoya.horiguchi@nec.com> Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Nanyong Sun <sunnanyong@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/ksm.c')
-rw-r--r--mm/ksm.c45
1 files changed, 45 insertions, 0 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index 290a3eb6d8de..37c63310bc4e 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2738,6 +2738,51 @@ again:
goto again;
}
+#ifdef CONFIG_MEMORY_FAILURE
+/*
+ * Collect processes when the error hit an ksm page.
+ */
+void collect_procs_ksm(struct page *page, struct list_head *to_kill,
+ int force_early)
+{
+ struct ksm_stable_node *stable_node;
+ struct ksm_rmap_item *rmap_item;
+ struct folio *folio = page_folio(page);
+ struct vm_area_struct *vma;
+ struct task_struct *tsk;
+
+ stable_node = folio_stable_node(folio);
+ if (!stable_node)
+ return;
+ hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
+ struct anon_vma *av = rmap_item->anon_vma;
+
+ anon_vma_lock_read(av);
+ read_lock(&tasklist_lock);
+ for_each_process(tsk) {
+ struct anon_vma_chain *vmac;
+ unsigned long addr;
+ struct task_struct *t =
+ task_early_kill(tsk, force_early);
+ if (!t)
+ continue;
+ anon_vma_interval_tree_foreach(vmac, &av->rb_root, 0,
+ ULONG_MAX)
+ {
+ vma = vmac->vma;
+ if (vma->vm_mm == t->mm) {
+ addr = rmap_item->address & PAGE_MASK;
+ add_to_kill_ksm(t, page, vma, to_kill,
+ addr);
+ }
+ }
+ }
+ read_unlock(&tasklist_lock);
+ anon_vma_unlock_read(av);
+ }
+}
+#endif
+
#ifdef CONFIG_MIGRATION
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio)
{