summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mm/hugetlb.c31
1 files changed, 26 insertions, 5 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 9a263a1c200e..5425936a4590 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1451,16 +1451,18 @@ static void __free_huge_page(struct page *page)
if (HPageTemporary(page)) {
remove_hugetlb_page(h, page, false);
+ spin_unlock(&hugetlb_lock);
update_and_free_page(h, page);
} else if (h->surplus_huge_pages_node[nid]) {
/* remove the page from active list */
remove_hugetlb_page(h, page, true);
+ spin_unlock(&hugetlb_lock);
update_and_free_page(h, page);
} else {
arch_clear_hugepage_flags(page);
enqueue_huge_page(h, page);
+ spin_unlock(&hugetlb_lock);
}
- spin_unlock(&hugetlb_lock);
}
/*
@@ -1741,7 +1743,13 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
list_entry(h->hugepage_freelists[node].next,
struct page, lru);
remove_hugetlb_page(h, page, acct_surplus);
+ /*
+ * unlock/lock around update_and_free_page is temporary
+ * and will be removed with subsequent patch.
+ */
+ spin_unlock(&hugetlb_lock);
update_and_free_page(h, page);
+ spin_lock(&hugetlb_lock);
ret = 1;
break;
}
@@ -1810,8 +1818,9 @@ retry:
}
remove_hugetlb_page(h, page, false);
h->max_huge_pages--;
+ spin_unlock(&hugetlb_lock);
update_and_free_page(h, head);
- rc = 0;
+ return 0;
}
out:
spin_unlock(&hugetlb_lock);
@@ -2563,22 +2572,34 @@ static void try_to_free_low(struct hstate *h, unsigned long count,
nodemask_t *nodes_allowed)
{
int i;
+ struct page *page, *next;
+ LIST_HEAD(page_list);
if (hstate_is_gigantic(h))
return;
+ /*
+ * Collect pages to be freed on a list, and free after dropping lock
+ */
for_each_node_mask(i, *nodes_allowed) {
- struct page *page, *next;
struct list_head *freel = &h->hugepage_freelists[i];
list_for_each_entry_safe(page, next, freel, lru) {
if (count >= h->nr_huge_pages)
- return;
+ goto out;
if (PageHighMem(page))
continue;
remove_hugetlb_page(h, page, false);
- update_and_free_page(h, page);
+ list_add(&page->lru, &page_list);
}
}
+
+out:
+ spin_unlock(&hugetlb_lock);
+ list_for_each_entry_safe(page, next, &page_list, lru) {
+ update_and_free_page(h, page);
+ cond_resched();
+ }
+ spin_lock(&hugetlb_lock);
}
#else
static inline void try_to_free_low(struct hstate *h, unsigned long count,