diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 105 |
1 files changed, 53 insertions, 52 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4f59d90..ebd425c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -39,6 +39,7 @@ #include <linux/stop_machine.h> #include <linux/sort.h> #include <linux/pfn.h> +#include <linux/backing-dev.h> #include <asm/tlbflush.h> #include <asm/div64.h> @@ -495,17 +496,16 @@ static void __free_pages_ok(struct page *page, unsigned int order) int i; int reserved = 0; - arch_free_page(page, order); - if (!PageHighMem(page)) - debug_check_no_locks_freed(page_address(page), - PAGE_SIZE<<order); - for (i = 0 ; i < (1 << order) ; ++i) reserved += free_pages_check(page + i); if (reserved) return; + if (!PageHighMem(page)) + debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order); + arch_free_page(page, order); kernel_map_pages(page, 1 << order, 0); + local_irq_save(flags); __count_vm_events(PGFREE, 1 << order); free_one_page(page_zone(page), page, order); @@ -781,13 +781,14 @@ static void fastcall free_hot_cold_page(struct page *page, int cold) struct per_cpu_pages *pcp; unsigned long flags; - arch_free_page(page, 0); - if (PageAnon(page)) page->mapping = NULL; if (free_pages_check(page)) return; + if (!PageHighMem(page)) + debug_check_no_locks_freed(page_address(page), PAGE_SIZE); + arch_free_page(page, 0); kernel_map_pages(page, 1, 0); pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; @@ -900,7 +901,8 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark, int classzone_idx, int alloc_flags) { /* free_pages my go negative - that's OK */ - long min = mark, free_pages = z->free_pages - (1 << order) + 1; + unsigned long min = mark; + long free_pages = z->free_pages - (1 << order) + 1; int o; if (alloc_flags & ALLOC_HIGH) @@ -1049,7 +1051,7 @@ nofail_alloc: if (page) goto got_pg; if (gfp_mask & __GFP_NOFAIL) { - blk_congestion_wait(WRITE, HZ/50); + congestion_wait(WRITE, HZ/50); goto nofail_alloc; } } @@ -1112,7 +1114,7 @@ rebalance: do_retry = 1; } if (do_retry) { - blk_congestion_wait(WRITE, HZ/50); + congestion_wait(WRITE, HZ/50); goto rebalance; } @@ -2050,8 +2052,8 @@ int __init early_pfn_to_nid(unsigned long pfn) /** * free_bootmem_with_active_regions - Call free_bootmem_node for each active range - * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed - * @max_low_pfn: The highest PFN that till be passed to free_bootmem_node + * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. + * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node * * If an architecture guarantees that all ranges registered with * add_active_ranges() contain no holes and may be freed, this @@ -2081,11 +2083,11 @@ void __init free_bootmem_with_active_regions(int nid, /** * sparse_memory_present_with_active_regions - Call memory_present for each active range - * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used + * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. * * If an architecture guarantees that all ranges registered with * add_active_ranges() contain no holes and may be freed, this - * this function may be used instead of calling memory_present() manually. + * function may be used instead of calling memory_present() manually. */ void __init sparse_memory_present_with_active_regions(int nid) { @@ -2155,14 +2157,14 @@ static void __init account_node_boundary(unsigned int nid, /** * get_pfn_range_for_nid - Return the start and end page frames for a node - * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned - * @start_pfn: Passed by reference. On return, it will have the node start_pfn - * @end_pfn: Passed by reference. On return, it will have the node end_pfn + * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. + * @start_pfn: Passed by reference. On return, it will have the node start_pfn. + * @end_pfn: Passed by reference. On return, it will have the node end_pfn. * * It returns the start and end page frame of a node based on information * provided by an arch calling add_active_range(). If called for a node * with no available memory, a warning is printed and the start and end - * PFNs will be 0 + * PFNs will be 0. */ void __init get_pfn_range_for_nid(unsigned int nid, unsigned long *start_pfn, unsigned long *end_pfn) @@ -2215,7 +2217,7 @@ unsigned long __init zone_spanned_pages_in_node(int nid, /* * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, - * then all holes in the requested range will be accounted for + * then all holes in the requested range will be accounted for. */ unsigned long __init __absent_pages_in_range(int nid, unsigned long range_start_pfn, @@ -2268,7 +2270,7 @@ unsigned long __init __absent_pages_in_range(int nid, * @start_pfn: The start PFN to start searching for holes * @end_pfn: The end PFN to stop searching for holes * - * It returns the number of pages frames in memory holes within a range + * It returns the number of pages frames in memory holes within a range. */ unsigned long __init absent_pages_in_range(unsigned long start_pfn, unsigned long end_pfn) @@ -2293,19 +2295,6 @@ unsigned long __init zone_absent_pages_in_node(int nid, return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); } -/* Return the zone index a PFN is in */ -int memmap_zone_idx(struct page *lmem_map) -{ - int i; - unsigned long phys_addr = virt_to_phys(lmem_map); - unsigned long pfn = phys_addr >> PAGE_SHIFT; - - for (i = 0; i < MAX_NR_ZONES; i++) - if (pfn < arch_zone_highest_possible_pfn[i]) - break; - - return i; -} #else static inline unsigned long zone_spanned_pages_in_node(int nid, unsigned long zone_type, @@ -2324,10 +2313,6 @@ static inline unsigned long zone_absent_pages_in_node(int nid, return zholes_size[zone_type]; } -static inline int memmap_zone_idx(struct page *lmem_map) -{ - return MAX_NR_ZONES; -} #endif static void __init calculate_node_totalpages(struct pglist_data *pgdat, @@ -2582,11 +2567,12 @@ void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn, /** * remove_all_active_ranges - Remove all currently registered regions + * * During discovery, it may be found that a table like SRAT is invalid * and an alternative discovery method must be used. This function removes * all currently registered regions. */ -void __init remove_all_active_ranges() +void __init remove_all_active_ranges(void) { memset(early_node_map, 0, sizeof(early_node_map)); nr_nodemap_entries = 0; @@ -2636,7 +2622,7 @@ unsigned long __init find_min_pfn_for_node(unsigned long nid) * find_min_pfn_with_active_regions - Find the minimum PFN registered * * It returns the minimum PFN based on information provided via - * add_active_range() + * add_active_range(). */ unsigned long __init find_min_pfn_with_active_regions(void) { @@ -2647,7 +2633,7 @@ unsigned long __init find_min_pfn_with_active_regions(void) * find_max_pfn_with_active_regions - Find the maximum PFN registered * * It returns the maximum PFN based on information provided via - * add_active_range() + * add_active_range(). */ unsigned long __init find_max_pfn_with_active_regions(void) { @@ -2662,10 +2648,7 @@ unsigned long __init find_max_pfn_with_active_regions(void) /** * free_area_init_nodes - Initialise all pg_data_t and zone data - * @arch_max_dma_pfn: The maximum PFN usable for ZONE_DMA - * @arch_max_dma32_pfn: The maximum PFN usable for ZONE_DMA32 - * @arch_max_low_pfn: The maximum PFN usable for ZONE_NORMAL - * @arch_max_high_pfn: The maximum PFN usable for ZONE_HIGHMEM + * @max_zone_pfn: an array of max PFNs for each zone * * This will call free_area_init_node() for each active node in the system. * Using the page ranges provided by add_active_range(), the size of each @@ -2723,14 +2706,15 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ /** - * set_dma_reserve - Account the specified number of pages reserved in ZONE_DMA - * @new_dma_reserve - The number of pages to mark reserved + * set_dma_reserve - set the specified number of pages reserved in the first zone + * @new_dma_reserve: The number of pages to mark reserved * * The per-cpu batchsize and zone watermarks are determined by present_pages. * In the DMA zone, a significant percentage may be consumed by kernel image * and other unfreeable allocations which can skew the watermarks badly. This - * function may optionally be used to account for unfreeable pages in - * ZONE_DMA. The effect will be lower watermarks and smaller per-cpu batchsize + * function may optionally be used to account for unfreeable pages in the + * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and + * smaller per-cpu batchsize. */ void __init set_dma_reserve(unsigned long new_dma_reserve) { @@ -2843,10 +2827,11 @@ static void setup_per_zone_lowmem_reserve(void) calculate_totalreserve_pages(); } -/* - * setup_per_zone_pages_min - called when min_free_kbytes changes. Ensures - * that the pages_{min,low,high} values for each zone are set correctly - * with respect to min_free_kbytes. +/** + * setup_per_zone_pages_min - called when min_free_kbytes changes. + * + * Ensures that the pages_{min,low,high} values for each zone are set correctly + * with respect to min_free_kbytes. */ void setup_per_zone_pages_min(void) { @@ -3135,3 +3120,19 @@ unsigned long page_to_pfn(struct page *page) EXPORT_SYMBOL(pfn_to_page); EXPORT_SYMBOL(page_to_pfn); #endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */ + +#if MAX_NUMNODES > 1 +/* + * Find the highest possible node id. + */ +int highest_possible_node_id(void) +{ + unsigned int node; + unsigned int highest = 0; + + for_each_node_mask(node, node_possible_map) + highest = node; + return highest; +} +EXPORT_SYMBOL(highest_possible_node_id); +#endif |