summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/internal.h12
-rw-r--r--mm/nommu.c4
-rw-r--r--mm/page_alloc.c7
-rw-r--r--mm/slab.c9
4 files changed, 10 insertions, 22 deletions
diff --git a/mm/internal.h b/mm/internal.h
index e3042db..7bb3397 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -15,19 +15,7 @@
static inline void set_page_refs(struct page *page, int order)
{
-#ifdef CONFIG_MMU
set_page_count(page, 1);
-#else
- int i;
-
- /*
- * We need to reference all the pages for this order, otherwise if
- * anyone accesses one of the pages with (get/put) it will be freed.
- * - eg: access_process_vm()
- */
- for (i = 0; i < (1 << order); i++)
- set_page_count(page + i, 1);
-#endif /* CONFIG_MMU */
}
static inline void __put_page(struct page *page)
diff --git a/mm/nommu.c b/mm/nommu.c
index 4951f47..db45efa 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -159,7 +159,7 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
/*
* kmalloc doesn't like __GFP_HIGHMEM for some reason
*/
- return kmalloc(size, gfp_mask & ~__GFP_HIGHMEM);
+ return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
}
struct page * vmalloc_to_page(void *addr)
@@ -623,7 +623,7 @@ static int do_mmap_private(struct vm_area_struct *vma, unsigned long len)
* - note that this may not return a page-aligned address if the object
* we're allocating is smaller than a page
*/
- base = kmalloc(len, GFP_KERNEL);
+ base = kmalloc(len, GFP_KERNEL|__GFP_COMP);
if (!base)
goto enomem;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7aa0181..e197818 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -422,11 +422,6 @@ static void __free_pages_ok(struct page *page, unsigned int order)
mutex_debug_check_no_locks_freed(page_address(page),
PAGE_SIZE<<order);
-#ifndef CONFIG_MMU
- for (i = 1 ; i < (1 << order) ; ++i)
- __put_page(page + i);
-#endif
-
for (i = 0 ; i < (1 << order) ; ++i)
reserved += free_pages_check(page + i);
if (reserved)
@@ -746,7 +741,6 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
clear_highpage(page + i);
}
-#ifdef CONFIG_MMU
/*
* split_page takes a non-compound higher-order page, and splits it into
* n (1<<order) sub-pages: page[0..n]
@@ -766,7 +760,6 @@ void split_page(struct page *page, unsigned int order)
set_page_count(page + i, 1);
}
}
-#endif
/*
* Really, prep_compound_page() should be called from __rmqueue_bulk(). But
diff --git a/mm/slab.c b/mm/slab.c
index f477acf..ff0ab77 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -590,6 +590,8 @@ static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
static inline struct kmem_cache *page_get_cache(struct page *page)
{
+ if (unlikely(PageCompound(page)))
+ page = (struct page *)page_private(page);
return (struct kmem_cache *)page->lru.next;
}
@@ -600,6 +602,8 @@ static inline void page_set_slab(struct page *page, struct slab *slab)
static inline struct slab *page_get_slab(struct page *page)
{
+ if (unlikely(PageCompound(page)))
+ page = (struct page *)page_private(page);
return (struct slab *)page->lru.prev;
}
@@ -2412,8 +2416,11 @@ static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp,
struct page *page;
/* Nasty!!!!!! I hope this is OK. */
- i = 1 << cachep->gfporder;
page = virt_to_page(objp);
+
+ i = 1;
+ if (likely(!PageCompound(page)))
+ i <<= cachep->gfporder;
do {
page_set_cache(page, cachep);
page_set_slab(page, slabp);
OpenPOWER on IntegriCloud