summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2007-04-12 15:30:22 +1000
committerPaul Mackerras <paulus@samba.org>2007-04-13 04:09:39 +1000
commit88df6e90fa9782dbf44d936e44649afe271e4790 (patch)
treed0f4c46731e35e96a381dd3e3138f0276741ca57
parentee4f2ea48674b6c9d91bc854edc51a3e6a7168c4 (diff)
downloadop-kernel-dev-88df6e90fa9782dbf44d936e44649afe271e4790.zip
op-kernel-dev-88df6e90fa9782dbf44d936e44649afe271e4790.tar.gz
[POWERPC] DEBUG_PAGEALLOC for 32-bit
Here's an implementation of DEBUG_PAGEALLOC for ppc32. It disables BAT mapping and is only tested with Hash table based processor though it shouldn't be too hard to adapt it to others. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> arch/powerpc/Kconfig.debug | 9 ++++++ arch/powerpc/mm/init_32.c | 4 +++ arch/powerpc/mm/pgtable_32.c | 52 +++++++++++++++++++++++++++++++++++++++ arch/powerpc/mm/ppc_mmu_32.c | 4 ++- include/asm-powerpc/cacheflush.h | 6 ++++ 5 files changed, 74 insertions(+), 1 deletion(-) Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/Kconfig.debug9
-rw-r--r--arch/powerpc/mm/init_32.c4
-rw-r--r--arch/powerpc/mm/pgtable_32.c52
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c4
-rw-r--r--include/asm-powerpc/cacheflush.h6
5 files changed, 74 insertions, 1 deletions
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 50f48f0..0f8bb86 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -18,6 +18,15 @@ config DEBUG_STACK_USAGE
This option will slow down process creation somewhat.
+config DEBUG_PAGEALLOC
+ bool "Debug page memory allocations"
+ depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND && PPC32
+ help
+ Unmap pages from the kernel linear mapping after free_pages().
+ This results in a large slowdown, but helps to find certain types
+ of memory corruptions.
+
+
config HCALL_STATS
bool "Hypervisor call instrumentation"
depends on PPC_PSERIES && DEBUG_FS
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 0e53ca8..5fce6cc 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -115,6 +115,10 @@ void MMU_setup(void)
if (strstr(cmd_line, "noltlbs")) {
__map_without_ltlbs = 1;
}
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ __map_without_bats = 1;
+ __map_without_ltlbs = 1;
+#endif
}
/*
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index f75f2fc..8a2fc16 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -451,3 +451,55 @@ exit:
return ret;
}
+#ifdef CONFIG_DEBUG_PAGEALLOC
+
+static int __change_page_attr(struct page *page, pgprot_t prot)
+{
+ pte_t *kpte;
+ pmd_t *kpmd;
+ unsigned long address;
+
+ BUG_ON(PageHighMem(page));
+ address = (unsigned long)page_address(page);
+
+ if (v_mapped_by_bats(address) || v_mapped_by_tlbcam(address))
+ return 0;
+ if (!get_pteptr(&init_mm, address, &kpte, &kpmd))
+ return -EINVAL;
+ set_pte_at(&init_mm, address, kpte, mk_pte(page, prot));
+ wmb();
+ flush_HPTE(0, address, pmd_val(*kpmd));
+ pte_unmap(kpte);
+
+ return 0;
+}
+
+/*
+ * Change the page attributes of an page in the linear mapping.
+ *
+ * THIS CONFLICTS WITH BAT MAPPINGS, DEBUG USE ONLY
+ */
+static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
+{
+ int i, err = 0;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ for (i = 0; i < numpages; i++, page++) {
+ err = __change_page_attr(page, prot);
+ if (err)
+ break;
+ }
+ local_irq_restore(flags);
+ return err;
+}
+
+
+void kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ if (PageHighMem(page))
+ return;
+
+ change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
+}
+#endif /* CONFIG_DEBUG_PAGEALLOC */
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index 7cceb2c..0506667 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -85,8 +85,10 @@ unsigned long __init mmu_mapin_ram(void)
unsigned long max_size = (256<<20);
unsigned long align;
- if (__map_without_bats)
+ if (__map_without_bats) {
+ printk(KERN_DEBUG "RAM mapped without BATs\n");
return 0;
+ }
/* Set up BAT2 and if necessary BAT3 to cover RAM. */
diff --git a/include/asm-powerpc/cacheflush.h b/include/asm-powerpc/cacheflush.h
index 08e93e7..ba667a383 100644
--- a/include/asm-powerpc/cacheflush.h
+++ b/include/asm-powerpc/cacheflush.h
@@ -64,6 +64,12 @@ extern void flush_dcache_phys_range(unsigned long start, unsigned long stop);
memcpy(dst, src, len)
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+/* internal debugging function */
+void kernel_map_pages(struct page *page, int numpages, int enable);
+#endif
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_CACHEFLUSH_H */
OpenPOWER on IntegriCloud