summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorJoel Fernandes <joelaf@google.com>2016-12-13 03:44:26 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-13 05:55:08 +0300
commit763b218ddfaf56761c19923beb7e16656f66ec62 (patch)
treed20c1ca842ce466541b1b98ed0fc5589f51a0897 /mm
parentf9e09977671b618aeb25ddc0d4c9a84d5b5cde9d (diff)
downloadlinux-763b218ddfaf56761c19923beb7e16656f66ec62.tar.xz
mm: add preempt points into __purge_vmap_area_lazy()
Use cond_resched_lock to avoid holding the vmap_area_lock for a potentially long time and thus creating bad latencies for various workloads. [hch: split from a larger patch by Joel, wrote the crappy changelog] Link: http://lkml.kernel.org/r/1479474236-4139-11-git-send-email-hch@lst.de Signed-off-by: Joel Fernandes <joelaf@google.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Jisheng Zhang <jszhang@marvell.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: John Dias <joaodias@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmalloc.c14
1 files changed, 9 insertions, 5 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d3c1f5ee48b4..a5584384eabc 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -628,7 +628,7 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
struct llist_node *valist;
struct vmap_area *va;
struct vmap_area *n_va;
- int nr = 0;
+ bool do_free = false;
lockdep_assert_held(&vmap_purge_lock);
@@ -638,18 +638,22 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
start = va->va_start;
if (va->va_end > end)
end = va->va_end;
- nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
+ do_free = true;
}
- if (!nr)
+ if (!do_free)
return false;
- atomic_sub(nr, &vmap_lazy_nr);
flush_tlb_kernel_range(start, end);
spin_lock(&vmap_area_lock);
- llist_for_each_entry_safe(va, n_va, valist, purge_list)
+ llist_for_each_entry_safe(va, n_va, valist, purge_list) {
+ int nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
+
__free_vmap_area(va);
+ atomic_sub(nr, &vmap_lazy_nr);
+ cond_resched_lock(&vmap_area_lock);
+ }
spin_unlock(&vmap_area_lock);
return true;
}