summaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r--arch/ia64/mm/Makefile2
-rw-r--r--arch/ia64/mm/contig.c12
-rw-r--r--arch/ia64/mm/discontig.c44
-rw-r--r--arch/ia64/mm/hugetlbpage.c12
-rw-r--r--arch/ia64/mm/init.c32
-rw-r--r--arch/ia64/mm/ioremap.c43
-rw-r--r--arch/ia64/mm/tlb.c12
7 files changed, 92 insertions, 65 deletions
diff --git a/arch/ia64/mm/Makefile b/arch/ia64/mm/Makefile
index d78d20f..bb0a01a 100644
--- a/arch/ia64/mm/Makefile
+++ b/arch/ia64/mm/Makefile
@@ -2,7 +2,7 @@
# Makefile for the ia64-specific parts of the memory manager.
#
-obj-y := init.o fault.o tlb.o extable.o
+obj-y := init.o fault.o tlb.o extable.o ioremap.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_NUMA) += numa.o
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index acaaec4..84fd1c1 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -97,7 +97,7 @@ find_max_pfn (unsigned long start, unsigned long end, void *arg)
* Find a place to put the bootmap and return its starting address in
* bootmap_start. This address must be page-aligned.
*/
-int
+static int __init
find_bootmap_location (unsigned long start, unsigned long end, void *arg)
{
unsigned long needed = *(unsigned long *)arg;
@@ -141,7 +141,7 @@ find_bootmap_location (unsigned long start, unsigned long end, void *arg)
* Walk the EFI memory map and find usable memory for the system, taking
* into account reserved areas.
*/
-void
+void __init
find_memory (void)
{
unsigned long bootmap_size;
@@ -176,18 +176,20 @@ find_memory (void)
*
* Allocate and setup per-cpu data areas.
*/
-void *
+void * __cpuinit
per_cpu_init (void)
{
void *cpu_data;
int cpu;
+ static int first_time=1;
/*
* get_free_pages() cannot be used before cpu_init() done. BSP
* allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
* get_zeroed_page().
*/
- if (smp_processor_id() == 0) {
+ if (first_time) {
+ first_time=0;
cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
for (cpu = 0; cpu < NR_CPUS; cpu++) {
@@ -226,7 +228,7 @@ count_dma_pages (u64 start, u64 end, void *arg)
* Set up the page tables.
*/
-void
+void __init
paging_init (void)
{
unsigned long max_dma;
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index c87d6d1..ec9eeb8 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -379,31 +379,6 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
}
/**
- * pgdat_insert - insert the pgdat into global pgdat_list
- * @pgdat: the pgdat for a node.
- */
-static void __init pgdat_insert(pg_data_t *pgdat)
-{
- pg_data_t *prev = NULL, *next;
-
- for_each_pgdat(next)
- if (pgdat->node_id < next->node_id)
- break;
- else
- prev = next;
-
- if (prev) {
- prev->pgdat_next = pgdat;
- pgdat->pgdat_next = next;
- } else {
- pgdat->pgdat_next = pgdat_list;
- pgdat_list = pgdat;
- }
-
- return;
-}
-
-/**
* memory_less_nodes - allocate and initialize CPU only nodes pernode
* information.
*/
@@ -525,15 +500,20 @@ void __init find_memory(void)
* find_pernode_space() does most of this already, we just need to set
* local_per_cpu_offset
*/
-void *per_cpu_init(void)
+void __cpuinit *per_cpu_init(void)
{
int cpu;
+ static int first_time = 1;
+
if (smp_processor_id() != 0)
return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
- for (cpu = 0; cpu < NR_CPUS; cpu++)
- per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
+ if (first_time) {
+ first_time = 0;
+ for (cpu = 0; cpu < NR_CPUS; cpu++)
+ per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
+ }
return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
}
@@ -555,7 +535,7 @@ void show_mem(void)
printk("Mem-info:\n");
show_free_areas();
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
- for_each_pgdat(pgdat) {
+ for_each_online_pgdat(pgdat) {
unsigned long present;
unsigned long flags;
int shared = 0, cached = 0, reserved = 0;
@@ -740,11 +720,5 @@ void __init paging_init(void)
pfn_offset, zholes_size);
}
- /*
- * Make memory less nodes become a member of the known nodes.
- */
- for_each_node_mask(node, memory_less_mask)
- pgdat_insert(mem_data[node].pgdat);
-
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
}
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index 2d13889..8d50671 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -68,9 +68,10 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr)
#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
/*
- * This function checks for proper alignment of input addr and len parameters.
+ * Don't actually need to do any preparation, but need to make sure
+ * the address is in the right region.
*/
-int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
+int prepare_hugepage_range(unsigned long addr, unsigned long len)
{
if (len & ~HPAGE_MASK)
return -EINVAL;
@@ -112,8 +113,7 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb,
unsigned long floor, unsigned long ceiling)
{
/*
- * This is called only when is_hugepage_only_range(addr,),
- * and it follows that is_hugepage_only_range(end,) also.
+ * This is called to free hugetlb page tables.
*
* The offset of these addresses from the base of the hugetlb
* region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
@@ -125,9 +125,9 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb,
addr = htlbpage_to_page(addr);
end = htlbpage_to_page(end);
- if (is_hugepage_only_range(tlb->mm, floor, HPAGE_SIZE))
+ if (REGION_NUMBER(floor) == RGN_HPAGE)
floor = htlbpage_to_page(floor);
- if (is_hugepage_only_range(tlb->mm, ceiling, HPAGE_SIZE))
+ if (REGION_NUMBER(ceiling) == RGN_HPAGE)
ceiling = htlbpage_to_page(ceiling);
free_pgd_range(tlb, addr, end, floor, ceiling);
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index b38b6d2..cafa877 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -109,6 +109,7 @@ lazy_mmu_prot_update (pte_t pte)
{
unsigned long addr;
struct page *page;
+ unsigned long order;
if (!pte_exec(pte))
return; /* not an executable page... */
@@ -119,7 +120,12 @@ lazy_mmu_prot_update (pte_t pte)
if (test_bit(PG_arch_1, &page->flags))
return; /* i-cache is already coherent with d-cache */
- flush_icache_range(addr, addr + PAGE_SIZE);
+ if (PageCompound(page)) {
+ order = (unsigned long) (page[1].lru.prev);
+ flush_icache_range(addr, addr + (1UL << order << PAGE_SHIFT));
+ }
+ else
+ flush_icache_range(addr, addr + PAGE_SIZE);
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
}
@@ -197,7 +203,7 @@ free_initmem (void)
eaddr = (unsigned long) ia64_imva(__init_end);
while (addr < eaddr) {
ClearPageReserved(virt_to_page(addr));
- set_page_count(virt_to_page(addr), 1);
+ init_page_count(virt_to_page(addr));
free_page(addr);
++totalram_pages;
addr += PAGE_SIZE;
@@ -206,7 +212,7 @@ free_initmem (void)
(__init_end - __init_begin) >> 10);
}
-void
+void __init
free_initrd_mem (unsigned long start, unsigned long end)
{
struct page *page;
@@ -252,7 +258,7 @@ free_initrd_mem (unsigned long start, unsigned long end)
continue;
page = virt_to_page(start);
ClearPageReserved(page);
- set_page_count(page, 1);
+ init_page_count(page);
free_page(start);
++totalram_pages;
}
@@ -261,7 +267,7 @@ free_initrd_mem (unsigned long start, unsigned long end)
/*
* This installs a clean page in the kernel's page table.
*/
-struct page *
+static struct page * __init
put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
{
pgd_t *pgd;
@@ -294,7 +300,7 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
return page;
}
-static void
+static void __init
setup_gate (void)
{
struct page *page;
@@ -411,7 +417,7 @@ ia64_mmu_init (void *my_cpu_data)
#ifdef CONFIG_VIRTUAL_MEM_MAP
-int
+int __init
create_mem_map_page_table (u64 start, u64 end, void *arg)
{
unsigned long address, start_page, end_page;
@@ -519,7 +525,7 @@ ia64_pfn_valid (unsigned long pfn)
}
EXPORT_SYMBOL(ia64_pfn_valid);
-int
+int __init
find_largest_hole (u64 start, u64 end, void *arg)
{
u64 *max_gap = arg;
@@ -535,7 +541,7 @@ find_largest_hole (u64 start, u64 end, void *arg)
}
#endif /* CONFIG_VIRTUAL_MEM_MAP */
-static int
+static int __init
count_reserved_pages (u64 start, u64 end, void *arg)
{
unsigned long num_reserved = 0;
@@ -556,7 +562,7 @@ count_reserved_pages (u64 start, u64 end, void *arg)
* purposes.
*/
-static int nolwsys;
+static int nolwsys __initdata;
static int __init
nolwsys_setup (char *s)
@@ -567,7 +573,7 @@ nolwsys_setup (char *s)
__setup("nolwsys", nolwsys_setup);
-void
+void __init
mem_init (void)
{
long reserved_pages, codesize, datasize, initsize;
@@ -600,7 +606,7 @@ mem_init (void)
kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
kclist_add(&kcore_kernel, _stext, _end - _stext);
- for_each_pgdat(pgdat)
+ for_each_online_pgdat(pgdat)
if (pgdat->bdata->node_bootmem_map)
totalram_pages += free_all_bootmem_node(pgdat);
@@ -640,7 +646,7 @@ mem_init (void)
void online_page(struct page *page)
{
ClearPageReserved(page);
- set_page_count(page, 1);
+ init_page_count(page);
__free_page(page);
totalram_pages++;
num_physpages++;
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
new file mode 100644
index 0000000..643ccc6
--- /dev/null
+++ b/arch/ia64/mm/ioremap.c
@@ -0,0 +1,43 @@
+/*
+ * (c) Copyright 2006 Hewlett-Packard Development Company, L.P.
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/compiler.h>
+#include <linux/module.h>
+#include <linux/efi.h>
+#include <asm/io.h>
+
+static inline void __iomem *
+__ioremap (unsigned long offset, unsigned long size)
+{
+ return (void __iomem *) (__IA64_UNCACHED_OFFSET | offset);
+}
+
+void __iomem *
+ioremap (unsigned long offset, unsigned long size)
+{
+ if (efi_mem_attribute_range(offset, size, EFI_MEMORY_WB))
+ return phys_to_virt(offset);
+
+ if (efi_mem_attribute_range(offset, size, EFI_MEMORY_UC))
+ return __ioremap(offset, size);
+
+ /*
+ * Someday this should check ACPI resources so we
+ * can do the right thing for hot-plugged regions.
+ */
+ return __ioremap(offset, size);
+}
+EXPORT_SYMBOL(ioremap);
+
+void __iomem *
+ioremap_nocache (unsigned long offset, unsigned long size)
+{
+ return __ioremap(offset, size);
+}
+EXPORT_SYMBOL(ioremap_nocache);
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index 6a4eec9..4dbbca0 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -156,17 +156,19 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
nbits = purge.max_bits;
start &= ~((1UL << nbits) - 1);
-# ifdef CONFIG_SMP
- platform_global_tlb_purge(mm, start, end, nbits);
-# else
preempt_disable();
+#ifdef CONFIG_SMP
+ if (mm != current->active_mm || cpus_weight(mm->cpu_vm_mask) != 1) {
+ platform_global_tlb_purge(mm, start, end, nbits);
+ preempt_enable();
+ return;
+ }
+#endif
do {
ia64_ptcl(start, (nbits<<2));
start += (1UL << nbits);
} while (start < end);
preempt_enable();
-# endif
-
ia64_srlz_i(); /* srlz.i implies srlz.d */
}
EXPORT_SYMBOL(flush_tlb_range);
OpenPOWER on IntegriCloud