diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/internal.h | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 29 | ||||
-rw-r--r-- | mm/slab.c | 6 | ||||
-rw-r--r-- | mm/slub.c | 19 | ||||
-rw-r--r-- | mm/swap.c | 2 |
5 files changed, 28 insertions, 30 deletions
diff --git a/mm/internal.h b/mm/internal.h index d527b80..a3110c0 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -24,7 +24,7 @@ static inline void set_page_count(struct page *page, int v) */ static inline void set_page_refcounted(struct page *page) { - VM_BUG_ON(PageCompound(page) && page_private(page) != (unsigned long)page); + VM_BUG_ON(PageCompound(page) && PageTail(page)); VM_BUG_ON(atomic_read(&page->_count)); set_page_count(page, 1); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 542fc08..fc241fe 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -225,7 +225,7 @@ static void bad_page(struct page *page) static void free_compound_page(struct page *page) { - __free_pages_ok(page, (unsigned long)page[1].lru.prev); + __free_pages_ok(page, compound_order(page)); } static void prep_compound_page(struct page *page, unsigned long order) @@ -234,12 +234,14 @@ static void prep_compound_page(struct page *page, unsigned long order) int nr_pages = 1 << order; set_compound_page_dtor(page, free_compound_page); - page[1].lru.prev = (void *)order; - for (i = 0; i < nr_pages; i++) { + set_compound_order(page, order); + __SetPageCompound(page); + for (i = 1; i < nr_pages; i++) { struct page *p = page + i; + __SetPageTail(p); __SetPageCompound(p); - set_page_private(p, (unsigned long)page); + p->first_page = page; } } @@ -248,15 +250,19 @@ static void destroy_compound_page(struct page *page, unsigned long order) int i; int nr_pages = 1 << order; - if (unlikely((unsigned long)page[1].lru.prev != order)) + if (unlikely(compound_order(page) != order)) bad_page(page); - for (i = 0; i < nr_pages; i++) { + if (unlikely(!PageCompound(page))) + bad_page(page); + __ClearPageCompound(page); + for (i = 1; i < nr_pages; i++) { struct page *p = page + i; - if (unlikely(!PageCompound(p) | - (page_private(p) != (unsigned long)page))) + if (unlikely(!PageCompound(p) | !PageTail(p) | + (p->first_page != page))) bad_page(page); + __ClearPageTail(p); __ClearPageCompound(p); } } @@ -429,13 +435,18 @@ static inline int free_pages_check(struct page *page) 1 << PG_private | 1 << PG_locked | 1 << PG_active | - 1 << PG_reclaim | 1 << PG_slab | 1 << PG_swapcache | 1 << PG_writeback | 1 << PG_reserved | 1 << PG_buddy )))) bad_page(page); + /* + * PageReclaim == PageTail. It is only an error + * for PageReclaim to be set if PageCompound is clear. + */ + if (unlikely(!PageCompound(page) && PageReclaim(page))) + bad_page(page); if (PageDirty(page)) __ClearPageDirty(page); /* @@ -602,8 +602,7 @@ static inline void page_set_cache(struct page *page, struct kmem_cache *cache) static inline struct kmem_cache *page_get_cache(struct page *page) { - if (unlikely(PageCompound(page))) - page = (struct page *)page_private(page); + page = compound_head(page); BUG_ON(!PageSlab(page)); return (struct kmem_cache *)page->lru.next; } @@ -615,8 +614,7 @@ static inline void page_set_slab(struct page *page, struct slab *slab) static inline struct slab *page_get_slab(struct page *page) { - if (unlikely(PageCompound(page))) - page = (struct page *)page_private(page); + page = compound_head(page); BUG_ON(!PageSlab(page)); return (struct slab *)page->lru.prev; } @@ -1325,9 +1325,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x) page = virt_to_page(x); - if (unlikely(PageCompound(page))) - page = page->first_page; - + page = compound_head(page); if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER))) set_tracking(s, x, TRACK_FREE); @@ -1338,10 +1336,7 @@ EXPORT_SYMBOL(kmem_cache_free); /* Figure out on which slab object the object resides */ static struct page *get_object_page(const void *x) { - struct page *page = virt_to_page(x); - - if (unlikely(PageCompound(page))) - page = page->first_page; + struct page *page = compound_head(virt_to_page(x)); if (!PageSlab(page)) return NULL; @@ -2081,10 +2076,7 @@ void kfree(const void *x) if (!x) return; - page = virt_to_page(x); - - if (unlikely(PageCompound(page))) - page = page->first_page; + page = compound_head(virt_to_page(x)); s = page->slab; @@ -2120,10 +2112,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) return NULL; } - page = virt_to_page(p); - - if (unlikely(PageCompound(page))) - page = page->first_page; + page = compound_head(virt_to_page(p)); new_cache = get_slab(new_size, flags); @@ -55,7 +55,7 @@ static void fastcall __page_cache_release(struct page *page) static void put_compound_page(struct page *page) { - page = (struct page *)page_private(page); + page = compound_head(page); if (put_page_testzero(page)) { compound_page_dtor *dtor; |