summaryrefslogtreecommitdiff
path: root/mm/swapfile.c
diff options
context:
space:
mode:
authorHuang Ying <ying.huang@intel.com>2017-09-07 02:22:23 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-07 03:27:27 +0300
commitf0eea189e8e969b66e03bac8a7d92888ba267854 (patch)
tree4af1e27e1cc318c37b1b447ff4a65a2f5048dea7 /mm/swapfile.c
parentba3c4ce6def4915093be80585ff69f780630f32f (diff)
downloadlinux-f0eea189e8e969b66e03bac8a7d92888ba267854.tar.xz
mm, THP, swap: don't allocate huge cluster for file backed swap device
It's hard to write a whole transparent huge page (THP) to a file backed swap device during swapping out and the file backed swap device isn't very popular. So the huge cluster allocation for the file backed swap device is disabled. Link: http://lkml.kernel.org/r/20170724051840.2309-5-ying.huang@intel.com Signed-off-by: "Huang, Ying" <ying.huang@intel.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Shaohua Li <shli@kernel.org> Cc: Rik van Riel <riel@redhat.com> Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Michal Hocko <mhocko@kernel.org> Cc: Ross Zwisler <ross.zwisler@intel.com> [for brd.c, zram_drv.c, pmem.c] Cc: Vishal L Verma <vishal.l.verma@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 2bfbfb87123a..267b1fe41844 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -948,9 +948,10 @@ start_over:
spin_unlock(&si->lock);
goto nextsi;
}
- if (cluster)
- n_ret = swap_alloc_cluster(si, swp_entries);
- else
+ if (cluster) {
+ if (!(si->flags & SWP_FILE))
+ n_ret = swap_alloc_cluster(si, swp_entries);
+ } else
n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
n_goal, swp_entries);
spin_unlock(&si->lock);