summaryrefslogtreecommitdiff
path: root/mm/ksm.c
diff options
context:
space:
mode:
authorIzik Eidus <ieidus@redhat.com>2009-09-24 02:56:04 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-24 18:20:56 +0400
commit2c6854fdadf940678fd54779b778f6faafb870bb (patch)
tree0ed3efb3651813593e38e7976d1201a738b300a6 /mm/ksm.c
parentd2b5ec3aa0784335f031239e71fb50924cac9e0d (diff)
downloadlinux-2c6854fdadf940678fd54779b778f6faafb870bb.tar.xz
ksm: change default values to better fit into mainline kernel
Now that ksm is in mainline it is better to change the default values to better fit to most of the users. This patch change the ksm default values to be: ksm_thread_pages_to_scan = 100 (instead of 200) ksm_thread_sleep_millisecs = 20 (like before) ksm_run = KSM_RUN_STOP (instead of KSM_RUN_MERGE - meaning ksm is disabled by default) ksm_max_kernel_pages = nr_free_buffer_pages / 4 (instead of 2046) The important aspect of this patch is: it disables ksm by default, and sets the number of the kernel_pages that can be allocated to be a reasonable number. Signed-off-by: Izik Eidus <ieidus@redhat.com> Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/ksm.c')
-rw-r--r--mm/ksm.c14
1 files changed, 11 insertions, 3 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index 37cc37325094..f7edac356f46 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -30,6 +30,7 @@
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/mmu_notifier.h>
+#include <linux/swap.h>
#include <linux/ksm.h>
#include <asm/tlbflush.h>
@@ -162,10 +163,10 @@ static unsigned long ksm_pages_unshared;
static unsigned long ksm_rmap_items;
/* Limit on the number of unswappable pages used */
-static unsigned long ksm_max_kernel_pages = 2000;
+static unsigned long ksm_max_kernel_pages;
/* Number of pages ksmd should scan in one batch */
-static unsigned int ksm_thread_pages_to_scan = 200;
+static unsigned int ksm_thread_pages_to_scan = 100;
/* Milliseconds ksmd should sleep between batches */
static unsigned int ksm_thread_sleep_millisecs = 20;
@@ -173,7 +174,7 @@ static unsigned int ksm_thread_sleep_millisecs = 20;
#define KSM_RUN_STOP 0
#define KSM_RUN_MERGE 1
#define KSM_RUN_UNMERGE 2
-static unsigned int ksm_run = KSM_RUN_MERGE;
+static unsigned int ksm_run = KSM_RUN_STOP;
static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
static DEFINE_MUTEX(ksm_thread_mutex);
@@ -183,6 +184,11 @@ static DEFINE_SPINLOCK(ksm_mmlist_lock);
sizeof(struct __struct), __alignof__(struct __struct),\
(__flags), NULL)
+static void __init ksm_init_max_kernel_pages(void)
+{
+ ksm_max_kernel_pages = nr_free_buffer_pages() / 4;
+}
+
static int __init ksm_slab_init(void)
{
rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
@@ -1667,6 +1673,8 @@ static int __init ksm_init(void)
struct task_struct *ksm_thread;
int err;
+ ksm_init_max_kernel_pages();
+
err = ksm_slab_init();
if (err)
goto out;