summaryrefslogtreecommitdiff
path: root/mm/memory-failure.c
diff options
context:
space:
mode:
authorMiaohe Lin <linmiaohe@huawei.com>2022-08-18 16:00:16 +0300
committerAndrew Morton <akpm@linux-foundation.org>2022-09-12 06:25:58 +0300
commite9ff3ba7ff10490a92792faf1d3573a24fc6e5c9 (patch)
tree2777337b97a3175abe6d4cc410b0411b179b0aa1 /mm/memory-failure.c
parent0792a4a6195a6d67a4ead2554da393cbd8dcdf5a (diff)
downloadlinux-e9ff3ba7ff10490a92792faf1d3573a24fc6e5c9.tar.xz
mm, hwpoison: avoid trying to unpoison reserved page
For reserved pages, HWPoison flag will be set without increasing the page refcnt. So we shouldn't even try to unpoison these pages and thus decrease the page refcnt unexpectly. Add a PageReserved() check to filter this case out and remove the below unneeded zero page (zero page is reserved) check. Link: https://lkml.kernel.org/r/20220818130016.45313-7-linmiaohe@huawei.com Signed-off-by: Miaohe Lin <linmiaohe@huawei.com> Acked-by: Naoya Horiguchi <naoya.horiguchi@nec.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memory-failure.c')
-rw-r--r--mm/memory-failure.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 3b8e7937ce75..8156ef0983a3 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -2351,7 +2351,7 @@ int unpoison_memory(unsigned long pfn)
goto unlock_mutex;
}
- if (PageSlab(page) || PageTable(page))
+ if (PageSlab(page) || PageTable(page) || PageReserved(page))
goto unlock_mutex;
ret = get_hwpoison_page(p, MF_UNPOISON);
@@ -2382,7 +2382,7 @@ int unpoison_memory(unsigned long pfn)
freeit = !!TestClearPageHWPoison(p);
put_page(page);
- if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1)) {
+ if (freeit) {
put_page(page);
ret = 0;
}