summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2008-09-04 08:08:42 -0700
committerH. Peter Anvin <hpa@zytor.com>2008-09-04 08:08:42 -0700
commit7203781c98ad9147564d327de6f6513ad8fc0f4e (patch)
tree5c29a2a04a626bf08a0d56fd8a0068b3c92ad284 /arch/x86/mm
parent671eef85a3e885dff4ce210d8774ad50a91d5967 (diff)
parentaf2e1f276ff08f17192411ea3b71c13a758dfe12 (diff)
downloadop-kernel-dev-7203781c98ad9147564d327de6f6513ad8fc0f4e.zip
op-kernel-dev-7203781c98ad9147564d327de6f6513ad8fc0f4e.tar.gz
Merge branch 'x86/cpu' into x86/core
Conflicts: arch/x86/kernel/cpu/feature_names.c include/asm-x86/cpufeature.h
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/init_64.c48
-rw-r--r--arch/x86/mm/ioremap.c10
-rw-r--r--arch/x86/mm/mmio-mod.c4
-rw-r--r--arch/x86/mm/pageattr-test.c3
-rw-r--r--arch/x86/mm/pageattr.c27
-rw-r--r--arch/x86/mm/pat.c50
-rw-r--r--arch/x86/mm/srat_32.c12
7 files changed, 98 insertions, 56 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 129618c..d3746ef 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -60,7 +60,7 @@ static unsigned long dma_reserve __initdata;
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-int direct_gbpages __meminitdata
+int direct_gbpages
#ifdef CONFIG_DIRECT_GBPAGES
= 1
#endif
@@ -88,7 +88,11 @@ early_param("gbpages", parse_direct_gbpages_on);
int after_bootmem;
-static __init void *spp_getpage(void)
+/*
+ * NOTE: This function is marked __ref because it calls __init function
+ * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
+ */
+static __ref void *spp_getpage(void)
{
void *ptr;
@@ -237,7 +241,7 @@ static unsigned long __initdata table_start;
static unsigned long __meminitdata table_end;
static unsigned long __meminitdata table_top;
-static __meminit void *alloc_low_page(unsigned long *phys)
+static __ref void *alloc_low_page(unsigned long *phys)
{
unsigned long pfn = table_end++;
void *adr;
@@ -258,7 +262,7 @@ static __meminit void *alloc_low_page(unsigned long *phys)
return adr;
}
-static __meminit void unmap_low_page(void *adr)
+static __ref void unmap_low_page(void *adr)
{
if (after_bootmem)
return;
@@ -314,6 +318,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
{
unsigned long pages = 0;
unsigned long last_map_addr = end;
+ unsigned long start = address;
int i = pmd_index(address);
@@ -331,16 +336,24 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
}
if (pmd_val(*pmd)) {
- if (!pmd_large(*pmd))
+ if (!pmd_large(*pmd)) {
+ spin_lock(&init_mm.page_table_lock);
last_map_addr = phys_pte_update(pmd, address,
- end);
+ end);
+ spin_unlock(&init_mm.page_table_lock);
+ }
+ /* Count entries we're using from level2_ident_pgt */
+ if (start == 0)
+ pages++;
continue;
}
if (page_size_mask & (1<<PG_LEVEL_2M)) {
pages++;
+ spin_lock(&init_mm.page_table_lock);
set_pte((pte_t *)pmd,
pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
+ spin_unlock(&init_mm.page_table_lock);
last_map_addr = (address & PMD_MASK) + PMD_SIZE;
continue;
}
@@ -349,7 +362,9 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
last_map_addr = phys_pte_init(pte, address, end);
unmap_low_page(pte);
+ spin_lock(&init_mm.page_table_lock);
pmd_populate_kernel(&init_mm, pmd, __va(pte_phys));
+ spin_unlock(&init_mm.page_table_lock);
}
update_page_count(PG_LEVEL_2M, pages);
return last_map_addr;
@@ -362,9 +377,7 @@ phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end,
pmd_t *pmd = pmd_offset(pud, 0);
unsigned long last_map_addr;
- spin_lock(&init_mm.page_table_lock);
last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask);
- spin_unlock(&init_mm.page_table_lock);
__flush_tlb_all();
return last_map_addr;
}
@@ -400,20 +413,21 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
if (page_size_mask & (1<<PG_LEVEL_1G)) {
pages++;
+ spin_lock(&init_mm.page_table_lock);
set_pte((pte_t *)pud,
pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
+ spin_unlock(&init_mm.page_table_lock);
last_map_addr = (addr & PUD_MASK) + PUD_SIZE;
continue;
}
pmd = alloc_low_page(&pmd_phys);
-
- spin_lock(&init_mm.page_table_lock);
last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask);
unmap_low_page(pmd);
+
+ spin_lock(&init_mm.page_table_lock);
pud_populate(&init_mm, pud, __va(pmd_phys));
spin_unlock(&init_mm.page_table_lock);
-
}
__flush_tlb_all();
update_page_count(PG_LEVEL_1G, pages);
@@ -505,16 +519,14 @@ static unsigned long __init kernel_physical_mapping_init(unsigned long start,
continue;
}
- if (after_bootmem)
- pud = pud_offset(pgd, start & PGDIR_MASK);
- else
- pud = alloc_low_page(&pud_phys);
-
+ pud = alloc_low_page(&pud_phys);
last_map_addr = phys_pud_init(pud, __pa(start), __pa(next),
page_size_mask);
unmap_low_page(pud);
- pgd_populate(&init_mm, pgd_offset_k(start),
- __va(pud_phys));
+
+ spin_lock(&init_mm.page_table_lock);
+ pgd_populate(&init_mm, pgd, __va(pud_phys));
+ spin_unlock(&init_mm.page_table_lock);
}
return last_map_addr;
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index fba57be..cac6da5 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -170,7 +170,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
phys_addr &= PAGE_MASK;
size = PAGE_ALIGN(last_addr+1) - phys_addr;
- retval = reserve_memtype(phys_addr, phys_addr + size,
+ retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
prot_val, &new_prot_val);
if (retval) {
pr_debug("Warning: reserve_memtype returned %d\n", retval);
@@ -553,13 +553,11 @@ static int __init check_early_ioremap_leak(void)
{
if (!early_ioremap_nested)
return 0;
-
- printk(KERN_WARNING
+ WARN(1, KERN_WARNING
"Debug warning: early ioremap leak of %d areas detected.\n",
- early_ioremap_nested);
+ early_ioremap_nested);
printk(KERN_WARNING
- "please boot with early_ioremap_debug and report the dmesg.\n");
- WARN_ON(1);
+ "please boot with early_ioremap_debug and report the dmesg.\n");
return 1;
}
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index e7397e1..635b50e 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -430,7 +430,9 @@ static void enter_uniprocessor(void)
"may miss events.\n");
}
-static void leave_uniprocessor(void)
+/* __ref because leave_uniprocessor calls cpu_up which is __cpuinit,
+ but this whole function is ifdefed CONFIG_HOTPLUG_CPU */
+static void __ref leave_uniprocessor(void)
{
int cpu;
int err;
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index 0dcd42e..d4aa503 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -221,8 +221,7 @@ static int pageattr_test(void)
failed += print_split(&sc);
if (failed) {
- printk(KERN_ERR "NOT PASSED. Please report.\n");
- WARN_ON(1);
+ WARN(1, KERN_ERR "NOT PASSED. Please report.\n");
return -EINVAL;
} else {
if (print)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 65c6e46..43e2f84 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -55,13 +55,19 @@ static void split_page_count(int level)
int arch_report_meminfo(char *page)
{
- int n = sprintf(page, "DirectMap4k: %8lu\n"
- "DirectMap2M: %8lu\n",
- direct_pages_count[PG_LEVEL_4K],
- direct_pages_count[PG_LEVEL_2M]);
+ int n = sprintf(page, "DirectMap4k: %8lu kB\n",
+ direct_pages_count[PG_LEVEL_4K] << 2);
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+ n += sprintf(page + n, "DirectMap2M: %8lu kB\n",
+ direct_pages_count[PG_LEVEL_2M] << 11);
+#else
+ n += sprintf(page + n, "DirectMap4M: %8lu kB\n",
+ direct_pages_count[PG_LEVEL_2M] << 12);
+#endif
#ifdef CONFIG_X86_64
- n += sprintf(page + n, "DirectMap1G: %8lu\n",
- direct_pages_count[PG_LEVEL_1G]);
+ if (direct_gbpages)
+ n += sprintf(page + n, "DirectMap1G: %8lu kB\n",
+ direct_pages_count[PG_LEVEL_1G] << 20);
#endif
return n;
}
@@ -592,10 +598,9 @@ repeat:
if (!pte_val(old_pte)) {
if (!primary)
return 0;
- printk(KERN_WARNING "CPA: called for zero pte. "
+ WARN(1, KERN_WARNING "CPA: called for zero pte. "
"vaddr = %lx cpa->vaddr = %lx\n", address,
cpa->vaddr);
- WARN_ON(1);
return -EINVAL;
}
@@ -844,7 +849,7 @@ int set_memory_uc(unsigned long addr, int numpages)
/*
* for now UC MINUS. see comments in ioremap_nocache()
*/
- if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
+ if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
_PAGE_CACHE_UC_MINUS, NULL))
return -EINVAL;
@@ -863,7 +868,7 @@ int set_memory_wc(unsigned long addr, int numpages)
if (!pat_enabled)
return set_memory_uc(addr, numpages);
- if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
+ if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
_PAGE_CACHE_WC, NULL))
return -EINVAL;
@@ -879,7 +884,7 @@ int _set_memory_wb(unsigned long addr, int numpages)
int set_memory_wb(unsigned long addr, int numpages)
{
- free_memtype(addr, addr + numpages * PAGE_SIZE);
+ free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
return _set_memory_wb(addr, numpages);
}
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 2fe3091..2a50e0f 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -207,6 +207,9 @@ static int chk_conflict(struct memtype *new, struct memtype *entry,
return -EBUSY;
}
+static struct memtype *cached_entry;
+static u64 cached_start;
+
/*
* req_type typically has one of the:
* - _PAGE_CACHE_WB
@@ -280,11 +283,17 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
spin_lock(&memtype_lock);
+ if (cached_entry && start >= cached_start)
+ entry = cached_entry;
+ else
+ entry = list_entry(&memtype_list, struct memtype, nd);
+
/* Search for existing mapping that overlaps the current range */
where = NULL;
- list_for_each_entry(entry, &memtype_list, nd) {
+ list_for_each_entry_continue(entry, &memtype_list, nd) {
if (end <= entry->start) {
where = entry->nd.prev;
+ cached_entry = list_entry(where, struct memtype, nd);
break;
} else if (start <= entry->start) { /* end > entry->start */
err = chk_conflict(new, entry, new_type);
@@ -292,6 +301,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
dprintk("Overlap at 0x%Lx-0x%Lx\n",
entry->start, entry->end);
where = entry->nd.prev;
+ cached_entry = list_entry(where,
+ struct memtype, nd);
}
break;
} else if (start < entry->end) { /* start > entry->start */
@@ -299,7 +310,20 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
if (!err) {
dprintk("Overlap at 0x%Lx-0x%Lx\n",
entry->start, entry->end);
- where = &entry->nd;
+ cached_entry = list_entry(entry->nd.prev,
+ struct memtype, nd);
+
+ /*
+ * Move to right position in the linked
+ * list to add this new entry
+ */
+ list_for_each_entry_continue(entry,
+ &memtype_list, nd) {
+ if (start <= entry->start) {
+ where = entry->nd.prev;
+ break;
+ }
+ }
}
break;
}
@@ -314,6 +338,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
return err;
}
+ cached_start = start;
+
if (where)
list_add(&new->nd, where);
else
@@ -343,6 +369,9 @@ int free_memtype(u64 start, u64 end)
spin_lock(&memtype_lock);
list_for_each_entry(entry, &memtype_list, nd) {
if (entry->start == start && entry->end == end) {
+ if (cached_entry == entry || cached_start == start)
+ cached_entry = NULL;
+
list_del(&entry->nd);
kfree(entry);
err = 0;
@@ -361,14 +390,6 @@ int free_memtype(u64 start, u64 end)
}
-/*
- * /dev/mem mmap interface. The memtype used for mapping varies:
- * - Use UC for mappings with O_SYNC flag
- * - Without O_SYNC flag, if there is any conflict in reserve_memtype,
- * inherit the memtype from existing mapping.
- * - Else use UC_MINUS memtype (for backward compatibility with existing
- * X drivers.
- */
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
@@ -406,14 +427,14 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t *vma_prot)
{
u64 offset = ((u64) pfn) << PAGE_SHIFT;
- unsigned long flags = _PAGE_CACHE_UC_MINUS;
+ unsigned long flags = -1;
int retval;
if (!range_is_allowed(pfn, size))
return 0;
if (file->f_flags & O_SYNC) {
- flags = _PAGE_CACHE_UC;
+ flags = _PAGE_CACHE_UC_MINUS;
}
#ifdef CONFIG_X86_32
@@ -436,13 +457,14 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
#endif
/*
- * With O_SYNC, we can only take UC mapping. Fail if we cannot.
+ * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
+ *
* Without O_SYNC, we want to get
* - WB for WB-able memory and no other conflicting mappings
* - UC_MINUS for non-WB-able memory with no other conflicting mappings
* - Inherit from confliting mappings otherwise
*/
- if (flags != _PAGE_CACHE_UC_MINUS) {
+ if (flags != -1) {
retval = reserve_memtype(offset, offset + size, flags, NULL);
} else {
retval = reserve_memtype(offset, offset + size, -1, &flags);
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c
index 1eb2973..16ae70f 100644
--- a/arch/x86/mm/srat_32.c
+++ b/arch/x86/mm/srat_32.c
@@ -178,7 +178,7 @@ void acpi_numa_arch_fixup(void)
* start of the node, and that the current "end" address is after
* the previous one.
*/
-static __init void node_read_chunk(int nid, struct node_memory_chunk_s *memory_chunk)
+static __init int node_read_chunk(int nid, struct node_memory_chunk_s *memory_chunk)
{
/*
* Only add present memory as told by the e820.
@@ -189,10 +189,10 @@ static __init void node_read_chunk(int nid, struct node_memory_chunk_s *memory_c
if (memory_chunk->start_pfn >= max_pfn) {
printk(KERN_INFO "Ignoring SRAT pfns: %08lx - %08lx\n",
memory_chunk->start_pfn, memory_chunk->end_pfn);
- return;
+ return -1;
}
if (memory_chunk->nid != nid)
- return;
+ return -1;
if (!node_has_online_mem(nid))
node_start_pfn[nid] = memory_chunk->start_pfn;
@@ -202,6 +202,8 @@ static __init void node_read_chunk(int nid, struct node_memory_chunk_s *memory_c
if (node_end_pfn[nid] < memory_chunk->end_pfn)
node_end_pfn[nid] = memory_chunk->end_pfn;
+
+ return 0;
}
int __init get_memcfg_from_srat(void)
@@ -259,7 +261,9 @@ int __init get_memcfg_from_srat(void)
printk(KERN_DEBUG
"chunk %d nid %d start_pfn %08lx end_pfn %08lx\n",
j, chunk->nid, chunk->start_pfn, chunk->end_pfn);
- node_read_chunk(chunk->nid, chunk);
+ if (node_read_chunk(chunk->nid, chunk))
+ continue;
+
e820_register_active_regions(chunk->nid, chunk->start_pfn,
min(chunk->end_pfn, max_pfn));
}
OpenPOWER on IntegriCloud