summaryrefslogtreecommitdiff
path: root/mm/page_reporting.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@linux.intel.com>2020-04-07 06:05:10 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2020-04-07 20:43:39 +0300
commit02cf8719b8cb2b474b37bcbeee4706950b3d1d3d (patch)
tree85e4fc9ebaba53d9984c347b2bd1e6eb8e143605 /mm/page_reporting.c
parentb0c504f154718904ae49349147e3b7e6ae91ffdc (diff)
downloadlinux-02cf8719b8cb2b474b37bcbeee4706950b3d1d3d.tar.xz
mm/page_reporting: rotate reported pages to the tail of the list
Rather than walking over the same pages again and again to get to the pages that have yet to be reported we can save ourselves a significant amount of time by simply rotating the list so that when we have a full list of reported pages the head of the list is pointing to the next non-reported page. Doing this should save us some significant time when processing each free list. This doesn't gain us much in the standard case as all of the non-reported pages should be near the top of the list already. However in the case of page shuffling this results in a noticeable improvement. Below are the will-it-scale page_fault1 w/ THP numbers for 16 tasks with and without this patch. Without: tasks processes processes_idle threads threads_idle 16 8093776.25 0.17 5393242.00 38.20 With: tasks processes processes_idle threads threads_idle 16 8283274.75 0.17 5594261.00 38.15 Signed-off-by: Alexander Duyck <alexander.h.duyck@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Mel Gorman <mgorman@techsingularity.net> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Luiz Capitulino <lcapitulino@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Michael S. Tsirkin <mst@redhat.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Nitesh Narayan Lal <nitesh@redhat.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Pankaj Gupta <pagupta@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Rik van Riel <riel@surriel.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Wei Wang <wei.w.wang@intel.com> Cc: Yang Zhang <yang.zhang.wz@gmail.com> Cc: wei qi <weiqi4@huawei.com> Link: http://lkml.kernel.org/r/20200211224708.29318.16862.stgit@localhost.localdomain Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_reporting.c')
-rw-r--r--mm/page_reporting.c30
1 files changed, 22 insertions, 8 deletions
diff --git a/mm/page_reporting.c b/mm/page_reporting.c
index 1047c6872d4f..6885e74c2367 100644
--- a/mm/page_reporting.c
+++ b/mm/page_reporting.c
@@ -131,17 +131,27 @@ page_reporting_cycle(struct page_reporting_dev_info *prdev, struct zone *zone,
if (PageReported(page))
continue;
- /* Attempt to pull page from list */
- if (!__isolate_free_page(page, order))
- break;
+ /* Attempt to pull page from list and place in scatterlist */
+ if (*offset) {
+ if (!__isolate_free_page(page, order)) {
+ next = page;
+ break;
+ }
- /* Add page to scatter list */
- --(*offset);
- sg_set_page(&sgl[*offset], page, page_len, 0);
+ /* Add page to scatter list */
+ --(*offset);
+ sg_set_page(&sgl[*offset], page, page_len, 0);
- /* If scatterlist isn't full grab more pages */
- if (*offset)
continue;
+ }
+
+ /*
+ * Make the first non-processed page in the free list
+ * the new head of the free list before we release the
+ * zone lock.
+ */
+ if (&page->lru != list && !list_is_first(&page->lru, list))
+ list_rotate_to_front(&page->lru, list);
/* release lock before waiting on report processing */
spin_unlock_irq(&zone->lock);
@@ -169,6 +179,10 @@ page_reporting_cycle(struct page_reporting_dev_info *prdev, struct zone *zone,
break;
}
+ /* Rotate any leftover pages to the head of the freelist */
+ if (&next->lru != list && !list_is_first(&next->lru, list))
+ list_rotate_to_front(&next->lru, list);
+
spin_unlock_irq(&zone->lock);
return err;