summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-20 14:41:47 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-20 14:41:47 -0800
commit8b2e9b712f6139df9c754af0d67fecc4bbc88545 (patch)
tree4260126ffc11a2e60d92f3f9304bc2ea02516332
parentb5898cd057001336888b6aff601385b5f5e79b01 (diff)
downloadop-kernel-dev-8b2e9b712f6139df9c754af0d67fecc4bbc88545.zip
op-kernel-dev-8b2e9b712f6139df9c754af0d67fecc4bbc88545.tar.gz
Revert "mm: create a separate slab for page->ptl allocation"
This reverts commit ea1e7ed33708c7a760419ff9ded0a6cb90586a50. Al points out that while the commit *does* actually create a separate slab for the page->ptl allocation, that slab is never actually used, and the code continues to use kmalloc/kfree. Damien Wyart points out that the original patch did have the conversion to use kmem_cache_alloc/free, so it got lost somewhere on its way to me. Revert the half-arsed attempt that didn't do anything. If we really do want the special slab (remember: this is all relevant just for debug builds, so it's not necessarily all that critical) we might as well redo the patch fully. Reported-by: Al Viro <viro@zeniv.linux.org.uk> Acked-by: Andrew Morton <akpm@linux-foundation.org> Cc: Kirill A Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mm.h9
-rw-r--r--init/main.c2
-rw-r--r--mm/memory.c7
3 files changed, 1 insertions, 17 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0548eb2..1cedd00 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1318,7 +1318,6 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
#if USE_SPLIT_PTE_PTLOCKS
#if BLOATED_SPINLOCKS
-void __init ptlock_cache_init(void);
extern bool ptlock_alloc(struct page *page);
extern void ptlock_free(struct page *page);
@@ -1327,7 +1326,6 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
return page->ptl;
}
#else /* BLOATED_SPINLOCKS */
-static inline void ptlock_cache_init(void) {}
static inline bool ptlock_alloc(struct page *page)
{
return true;
@@ -1380,17 +1378,10 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
return &mm->page_table_lock;
}
-static inline void ptlock_cache_init(void) {}
static inline bool ptlock_init(struct page *page) { return true; }
static inline void pte_lock_deinit(struct page *page) {}
#endif /* USE_SPLIT_PTE_PTLOCKS */
-static inline void pgtable_init(void)
-{
- ptlock_cache_init();
- pgtable_cache_init();
-}
-
static inline bool pgtable_page_ctor(struct page *page)
{
inc_zone_page_state(page, NR_PAGETABLE);
diff --git a/init/main.c b/init/main.c
index 01573fd..febc511 100644
--- a/init/main.c
+++ b/init/main.c
@@ -476,7 +476,7 @@ static void __init mm_init(void)
mem_init();
kmem_cache_init();
percpu_init_late();
- pgtable_init();
+ pgtable_cache_init();
vmalloc_init();
}
diff --git a/mm/memory.c b/mm/memory.c
index 0409e8f..5d9025f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4272,13 +4272,6 @@ void copy_user_huge_page(struct page *dst, struct page *src,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
#if USE_SPLIT_PTE_PTLOCKS && BLOATED_SPINLOCKS
-static struct kmem_cache *page_ptl_cachep;
-void __init ptlock_cache_init(void)
-{
- page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
- SLAB_PANIC, NULL);
-}
-
bool ptlock_alloc(struct page *page)
{
spinlock_t *ptl;
OpenPOWER on IntegriCloud