summaryrefslogtreecommitdiff
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c23
1 files changed, 14 insertions, 9 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 9b79806be17b..0aee2f3ae15c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1535,6 +1535,13 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
return;
+ /*
+ * If we don't know which subpages are hwpoisoned, we can't free
+ * the hugepage, so it's leaked intentionally.
+ */
+ if (HPageRawHwpUnreliable(page))
+ return;
+
if (hugetlb_vmemmap_restore(h, page)) {
spin_lock_irq(&hugetlb_lock);
/*
@@ -1547,6 +1554,13 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
return;
}
+ /*
+ * Move PageHWPoison flag from head page to the raw error pages,
+ * which makes any healthy subpages reusable.
+ */
+ if (unlikely(PageHWPoison(page)))
+ hugetlb_clear_page_hwpoison(page);
+
for (i = 0; i < pages_per_huge_page(h);
i++, subpage = mem_map_next(subpage, page, i)) {
subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
@@ -2109,15 +2123,6 @@ retry:
*/
rc = hugetlb_vmemmap_restore(h, head);
if (!rc) {
- /*
- * Move PageHWPoison flag from head page to the raw
- * error page, which makes any subpages rather than
- * the error page reusable.
- */
- if (PageHWPoison(head) && page != head) {
- SetPageHWPoison(page);
- ClearPageHWPoison(head);
- }
update_and_free_page(h, head, false);
} else {
spin_lock_irq(&hugetlb_lock);