summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2013-11-14 14:31:53 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-15 09:32:20 +0900
commitea1e7ed33708c7a760419ff9ded0a6cb90586a50 (patch)
tree553c0c4024639bf2b1706d144fd6bb8ada63861e
parent539edb5846c740d78a8b6c2e43a99ca4323df68f (diff)
downloadop-kernel-dev-ea1e7ed33708c7a760419ff9ded0a6cb90586a50.zip
op-kernel-dev-ea1e7ed33708c7a760419ff9ded0a6cb90586a50.tar.gz
mm: create a separate slab for page->ptl allocation
If DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC are enabled spinlock_t on x86_64 is 72 bytes. For page->ptl they will be allocated from kmalloc-96 slab, so we loose 24 on each. An average system can easily allocate few tens thousands of page->ptl and overhead is significant. Let's create a separate slab for page->ptl allocation to solve this. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mm.h9
-rw-r--r--init/main.c2
-rw-r--r--mm/memory.c7
3 files changed, 17 insertions, 1 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1cedd00..0548eb2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1318,6 +1318,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
#if USE_SPLIT_PTE_PTLOCKS
#if BLOATED_SPINLOCKS
+void __init ptlock_cache_init(void);
extern bool ptlock_alloc(struct page *page);
extern void ptlock_free(struct page *page);
@@ -1326,6 +1327,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
return page->ptl;
}
#else /* BLOATED_SPINLOCKS */
+static inline void ptlock_cache_init(void) {}
static inline bool ptlock_alloc(struct page *page)
{
return true;
@@ -1378,10 +1380,17 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
return &mm->page_table_lock;
}
+static inline void ptlock_cache_init(void) {}
static inline bool ptlock_init(struct page *page) { return true; }
static inline void pte_lock_deinit(struct page *page) {}
#endif /* USE_SPLIT_PTE_PTLOCKS */
+static inline void pgtable_init(void)
+{
+ ptlock_cache_init();
+ pgtable_cache_init();
+}
+
static inline bool pgtable_page_ctor(struct page *page)
{
inc_zone_page_state(page, NR_PAGETABLE);
diff --git a/init/main.c b/init/main.c
index 6ad1a53..5f19113 100644
--- a/init/main.c
+++ b/init/main.c
@@ -473,7 +473,7 @@ static void __init mm_init(void)
mem_init();
kmem_cache_init();
percpu_init_late();
- pgtable_cache_init();
+ pgtable_init();
vmalloc_init();
}
diff --git a/mm/memory.c b/mm/memory.c
index 5d9025f..0409e8f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4272,6 +4272,13 @@ void copy_user_huge_page(struct page *dst, struct page *src,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
#if USE_SPLIT_PTE_PTLOCKS && BLOATED_SPINLOCKS
+static struct kmem_cache *page_ptl_cachep;
+void __init ptlock_cache_init(void)
+{
+ page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
+ SLAB_PANIC, NULL);
+}
+
bool ptlock_alloc(struct page *page)
{
spinlock_t *ptl;
OpenPOWER on IntegriCloud