summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2011-07-26 04:12:16 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-26 07:57:09 +0400
commit6ac47520063b230641a64062b8a229201cd0a3a8 (patch)
treee26a25226f980a50468f001bbd3243d74d0d9768 /mm
parent32f84528fbb5177275193a3311be8756f0cbd62c (diff)
downloadlinux-6ac47520063b230641a64062b8a229201cd0a3a8.tar.xz
mm/memory.c: remove ZAP_BLOCK_SIZE
ZAP_BLOCK_SIZE became unused in the preemptible-mmu_gather work ("mm: Remove i_mmap_lock lockbreak"). So zap it. Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c11
1 files changed, 0 insertions, 11 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 9b8a01d941cb..a58bbebb3070 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1290,13 +1290,6 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
return addr;
}
-#ifdef CONFIG_PREEMPT
-# define ZAP_BLOCK_SIZE (8 * PAGE_SIZE)
-#else
-/* No preempt: go for improved straight-line efficiency */
-# define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE)
-#endif
-
/**
* unmap_vmas - unmap a range of memory covered by a list of vma's
* @tlb: address of the caller's struct mmu_gather
@@ -1310,10 +1303,6 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
*
* Unmap all pages in the vma list.
*
- * We aim to not hold locks for too long (for scheduling latency reasons).
- * So zap pages in ZAP_BLOCK_SIZE bytecounts. This means we need to
- * return the ending mmu_gather to the caller.
- *
* Only addresses between `start' and `end' will be unmapped.
*
* The VMA list must be sorted in ascending virtual address order.