From c73602ad31cdcf7e6651f43d12f65b5b9b825b6f Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 7 Oct 2009 16:32:22 -0700 Subject: ksm: more on default values Adjust the max_kernel_pages default to a quarter of totalram_pages, instead of nr_free_buffer_pages() / 4: the KSM pages themselves come from highmem, and even on a 16GB PAE machine, 4GB of KSM pages would only be pinning 32MB of lowmem with their rmap_items, so no need for the more obscure calculation (nor for its own special init function). There is no way for the user to switch KSM on if CONFIG_SYSFS is not enabled, so in that case default run to KSM_RUN_MERGE. Update KSM Documentation and Kconfig to reflect the new defaults. Signed-off-by: Hugh Dickins Cc: Izik Eidus Cc: Andrea Arcangeli Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/ksm.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'mm/ksm.c') diff --git a/mm/ksm.c b/mm/ksm.c index f7edac3..bef1af4 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -184,11 +184,6 @@ static DEFINE_SPINLOCK(ksm_mmlist_lock); sizeof(struct __struct), __alignof__(struct __struct),\ (__flags), NULL) -static void __init ksm_init_max_kernel_pages(void) -{ - ksm_max_kernel_pages = nr_free_buffer_pages() / 4; -} - static int __init ksm_slab_init(void) { rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); @@ -1673,7 +1668,7 @@ static int __init ksm_init(void) struct task_struct *ksm_thread; int err; - ksm_init_max_kernel_pages(); + ksm_max_kernel_pages = totalram_pages / 4; err = ksm_slab_init(); if (err) @@ -1697,6 +1692,9 @@ static int __init ksm_init(void) kthread_stop(ksm_thread); goto out_free2; } +#else + ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ + #endif /* CONFIG_SYSFS */ return 0; -- cgit v1.1 From d178f27fc5150d680d9df865ea9dfe3269cf00a6 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 9 Nov 2009 15:58:23 +0000 Subject: ksm: cond_resched in unstable tree KSM needs a cond_resched() for CONFIG_PREEMPT_NONE, in its unbounded search of the unstable tree. The stable tree cases already have one, and originally there was one down inside get_user_pages(); but I missed it when I converted to follow_page() instead. Signed-off-by: Hugh Dickins Acked-by: Izik Eidus Signed-off-by: Linus Torvalds --- mm/ksm.c | 1 + 1 file changed, 1 insertion(+) (limited to 'mm/ksm.c') diff --git a/mm/ksm.c b/mm/ksm.c index bef1af4..5575f86 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1012,6 +1012,7 @@ static struct rmap_item *unstable_tree_search_insert(struct page *page, struct rmap_item *tree_rmap_item; int ret; + cond_resched(); tree_rmap_item = rb_entry(*new, struct rmap_item, node); page2[0] = get_mergeable_page(tree_rmap_item); if (!page2[0]) -- cgit v1.1