summaryrefslogtreecommitdiff
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorYang Shi <yang.shi@linux.alibaba.com>2020-06-04 02:03:37 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-04 06:09:49 +0300
commit67e4eb076840c7d199ebae0a8825c89a5b07d162 (patch)
tree2cc80a3f7a0a59b7d27eb50a53d5139722077b46 /mm/huge_memory.c
parent885902531586d5a20a74099c1357bfdc982befe3 (diff)
downloadlinux-67e4eb076840c7d199ebae0a8825c89a5b07d162.tar.xz
mm: thp: don't need to drain lru cache when splitting and mlocking THP
Since commit 8f182270dfec ("mm/swap.c: flush lru pvecs on compound page arrival") THP would not stay in pagevec anymore. So the optimization made by commit d965432234db ("thp: increase split_huge_page() success rate") doesn't make sense anymore, which tries to unpin munlocked THPs from pagevec by draining pagevec. Draining lru cache before isolating THP in mlock path is also unnecessary. b676b293fb48 ("mm, thp: fix mapped pages avoiding unevictable list on mlock") added it and 9a73f61bdb8a ("thp, mlock: do not mlock PTE-mapped file huge pages") accidentally carried it over after the above optimization went in. Signed-off-by: Yang Shi <yang.shi@linux.alibaba.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Hugh Dickins <hughd@google.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Link: http://lkml.kernel.org/r/1585946493-7531-1-git-send-email-yang.shi@linux.alibaba.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c7
1 files changed, 0 insertions, 7 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 6df182a18d2c..fb357f02046a 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1378,7 +1378,6 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
goto skip_mlock;
if (!trylock_page(page))
goto skip_mlock;
- lru_add_drain();
if (page->mapping && !PageDoubleMap(page))
mlock_vma_page(page);
unlock_page(page);
@@ -2582,7 +2581,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
struct anon_vma *anon_vma = NULL;
struct address_space *mapping = NULL;
int count, mapcount, extra_pins, ret;
- bool mlocked;
unsigned long flags;
pgoff_t end;
@@ -2641,14 +2639,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
goto out_unlock;
}
- mlocked = PageMlocked(head);
unmap_page(head);
VM_BUG_ON_PAGE(compound_mapcount(head), head);
- /* Make sure the page is not on per-CPU pagevec as it takes pin */
- if (mlocked)
- lru_add_drain();
-
/* prevent PageLRU to go away from under us, and freeze lru stats */
spin_lock_irqsave(&pgdata->lru_lock, flags);