diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8e36353..8c960b4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -137,9 +137,9 @@ static inline int bad_range(struct zone *zone, struct page *page) static void bad_page(struct page *page) { printk(KERN_EMERG "Bad page state in process '%s'\n" - "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" - "Trying to fix it up, but a reboot is needed\n" - "Backtrace:\n", + KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" + KERN_EMERG "Trying to fix it up, but a reboot is needed\n" + KERN_EMERG "Backtrace:\n", current->comm, page, (int)(2*sizeof(unsigned long)), (unsigned long)page->flags, page->mapping, page_mapcount(page), page_count(page)); @@ -931,7 +931,8 @@ restart: * * The caller may dip into page reserves a bit more if the caller * cannot run direct reclaim, or if the caller has realtime scheduling - * policy. + * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will + * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). */ alloc_flags = ALLOC_WMARK_MIN; if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait) @@ -1741,7 +1742,7 @@ void __devinit memmap_init_zone(unsigned long size, int nid, unsigned long zone, unsigned long end_pfn = start_pfn + size; unsigned long pfn; - for (pfn = start_pfn; pfn < end_pfn; pfn++, page++) { + for (pfn = start_pfn; pfn < end_pfn; pfn++) { if (!early_pfn_valid(pfn)) continue; page = pfn_to_page(pfn); |