diff options
author | Tejun Heo <tj@kernel.org> | 2010-01-05 09:17:33 +0900 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-01-05 09:17:33 +0900 |
commit | 32032df6c2f6c9c6b2ada2ce42322231824f70c2 (patch) | |
tree | b1ce838a37044bb38dfc128e2116ca35630e629a /arch/arm/mm | |
parent | 22b737f4c75197372d64afc6ed1bccd58c00e549 (diff) | |
parent | c5974b835a909ff15c3b7e6cf6789b5eb919f419 (diff) | |
download | op-kernel-dev-32032df6c2f6c9c6b2ada2ce42322231824f70c2.zip op-kernel-dev-32032df6c2f6c9c6b2ada2ce42322231824f70c2.tar.gz |
Merge branch 'master' into percpu
Conflicts:
arch/powerpc/platforms/pseries/hvCall.S
include/linux/percpu.h
Diffstat (limited to 'arch/arm/mm')
53 files changed, 1216 insertions, 605 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 8d43e58..baf6384 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -1,9 +1,5 @@ comment "Processor Type" -config CPU_32 - bool - default y - # Select CPU types depending on the architecture selected. This selects # which CPUs we support in the kernel image, and the compiler instruction # optimiser behaviour. @@ -17,7 +13,7 @@ config CPU_ARM610 select CPU_CP15_MMU select CPU_COPY_V3 if MMU select CPU_TLB_V3 if MMU - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY help The ARM610 is the successor to the ARM3 processor and was produced by VLSI Technology Inc. @@ -31,7 +27,7 @@ config CPU_ARM7TDMI depends on !MMU select CPU_32v4T select CPU_ABRT_LV4T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_V4 help A 32-bit RISC microprocessor based on the ARM7 processor core @@ -49,7 +45,7 @@ config CPU_ARM710 select CPU_CP15_MMU select CPU_COPY_V3 if MMU select CPU_TLB_V3 if MMU - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY help A 32-bit RISC microprocessor based on the ARM7 processor core designed by Advanced RISC Machines Ltd. The ARM710 is the @@ -64,7 +60,7 @@ config CPU_ARM720T bool "Support ARM720T processor" if ARCH_INTEGRATOR select CPU_32v4T select CPU_ABRT_LV4T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_V4 select CPU_CACHE_VIVT select CPU_CP15_MMU @@ -83,7 +79,7 @@ config CPU_ARM740T depends on !MMU select CPU_32v4T select CPU_ABRT_LV4T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_V3 # although the core is v4t select CPU_CP15_MPU help @@ -100,7 +96,7 @@ config CPU_ARM9TDMI depends on !MMU select CPU_32v4T select CPU_ABRT_NOMMU - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_V4 help A 32-bit RISC microprocessor based on the ARM9 processor core @@ -114,7 +110,7 @@ config CPU_ARM920T bool "Support ARM920T processor" if ARCH_INTEGRATOR select CPU_32v4T select CPU_ABRT_EV4T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_V4WT select CPU_CACHE_VIVT select CPU_CP15_MMU @@ -122,10 +118,7 @@ config CPU_ARM920T select CPU_TLB_V4WBI if MMU help The ARM920T is licensed to be produced by numerous vendors, - and is used in the Maverick EP9312 and the Samsung S3C2410. - - More information on the Maverick EP9312 at - <http://linuxdevices.com/products/PD2382866068.html>. + and is used in the Cirrus EP93xx and the Samsung S3C2410. Say Y if you want support for the ARM920T processor. Otherwise, say N. @@ -135,7 +128,7 @@ config CPU_ARM922T bool "Support ARM922T processor" if ARCH_INTEGRATOR select CPU_32v4T select CPU_ABRT_EV4T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_V4WT select CPU_CACHE_VIVT select CPU_CP15_MMU @@ -154,7 +147,7 @@ config CPU_ARM925T bool "Support ARM925T processor" if ARCH_OMAP1 select CPU_32v4T select CPU_ABRT_EV4T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_V4WT select CPU_CACHE_VIVT select CPU_CP15_MMU @@ -173,7 +166,7 @@ config CPU_ARM926T bool "Support ARM926T processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB select CPU_32v5 select CPU_ABRT_EV5TJ - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_VIVT select CPU_CP15_MMU select CPU_COPY_V4WB if MMU @@ -191,7 +184,7 @@ config CPU_FA526 bool select CPU_32v4 select CPU_ABRT_EV4 - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_VIVT select CPU_CP15_MMU select CPU_CACHE_FA @@ -210,7 +203,7 @@ config CPU_ARM940T depends on !MMU select CPU_32v4T select CPU_ABRT_NOMMU - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_VIVT select CPU_CP15_MPU help @@ -228,7 +221,7 @@ config CPU_ARM946E depends on !MMU select CPU_32v5 select CPU_ABRT_NOMMU - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_VIVT select CPU_CP15_MPU help @@ -244,7 +237,7 @@ config CPU_ARM1020 bool "Support ARM1020T (rev 0) processor" if ARCH_INTEGRATOR select CPU_32v5 select CPU_ABRT_EV4T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_V4WT select CPU_CACHE_VIVT select CPU_CP15_MMU @@ -262,7 +255,7 @@ config CPU_ARM1020E bool "Support ARM1020E processor" if ARCH_INTEGRATOR select CPU_32v5 select CPU_ABRT_EV4T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_V4WT select CPU_CACHE_VIVT select CPU_CP15_MMU @@ -275,7 +268,7 @@ config CPU_ARM1022 bool "Support ARM1022E processor" if ARCH_INTEGRATOR select CPU_32v5 select CPU_ABRT_EV4T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_VIVT select CPU_CP15_MMU select CPU_COPY_V4WB if MMU # can probably do better @@ -293,7 +286,7 @@ config CPU_ARM1026 bool "Support ARM1026EJ-S processor" if ARCH_INTEGRATOR select CPU_32v5 select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10 - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_VIVT select CPU_CP15_MMU select CPU_COPY_V4WB if MMU # can probably do better @@ -311,7 +304,7 @@ config CPU_SA110 select CPU_32v3 if ARCH_RPC select CPU_32v4 if !ARCH_RPC select CPU_ABRT_EV4 - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_V4WB select CPU_CACHE_VIVT select CPU_CP15_MMU @@ -331,7 +324,7 @@ config CPU_SA1100 bool select CPU_32v4 select CPU_ABRT_EV4 - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_V4WB select CPU_CACHE_VIVT select CPU_CP15_MMU @@ -342,7 +335,7 @@ config CPU_XSCALE bool select CPU_32v5 select CPU_ABRT_EV5T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_VIVT select CPU_CP15_MMU select CPU_TLB_V4WBI if MMU @@ -352,7 +345,7 @@ config CPU_XSC3 bool select CPU_32v5 select CPU_ABRT_EV5T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_VIVT select CPU_CP15_MMU select CPU_TLB_V4WBI if MMU @@ -363,7 +356,7 @@ config CPU_MOHAWK bool select CPU_32v5 select CPU_ABRT_EV5T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_VIVT select CPU_CP15_MMU select CPU_TLB_V4WBI if MMU @@ -374,7 +367,7 @@ config CPU_FEROCEON bool select CPU_32v5 select CPU_ABRT_EV5T - select CPU_PABRT_NOIFAR + select CPU_PABRT_LEGACY select CPU_CACHE_VIVT select CPU_CP15_MMU select CPU_COPY_FEROCEON if MMU @@ -391,10 +384,10 @@ config CPU_FEROCEON_OLD_ID # ARMv6 config CPU_V6 - bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX + bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX || ARCH_DOVE select CPU_32v6 select CPU_ABRT_EV6 - select CPU_PABRT_NOIFAR + select CPU_PABRT_V6 select CPU_CACHE_V6 select CPU_CACHE_VIPT select CPU_CP15_MMU @@ -420,7 +413,7 @@ config CPU_V7 select CPU_32v6K select CPU_32v7 select CPU_ABRT_EV7 - select CPU_PABRT_IFAR + select CPU_PABRT_V7 select CPU_CACHE_V7 select CPU_CACHE_VIPT select CPU_CP15_MMU @@ -482,10 +475,13 @@ config CPU_ABRT_EV6 config CPU_ABRT_EV7 bool -config CPU_PABRT_IFAR +config CPU_PABRT_LEGACY + bool + +config CPU_PABRT_V6 bool -config CPU_PABRT_NOIFAR +config CPU_PABRT_V7 bool # The cache model @@ -764,6 +760,15 @@ config CACHE_L2X0 help This option enables the L2x0 PrimeCell. +config CACHE_TAUROS2 + bool "Enable the Tauros2 L2 cache controller" + depends on ARCH_DOVE + default y + select OUTER_CACHE + help + This option enables the Tauros2 L2 cache controller (as + found on PJ1/PJ4). + config CACHE_XSC3L2 bool "Enable the L2 cache on XScale3" depends on CPU_XSC3 @@ -774,5 +779,5 @@ config CACHE_XSC3L2 config ARM_L1_CACHE_SHIFT int - default 6 if ARCH_OMAP3 + default 6 if ARCH_OMAP3 || ARCH_S5PC1XX default 5 diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 63e3f6d..827e238 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ iomap.o obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \ - pgd.o mmu.o + pgd.o mmu.o vmregion.o ifneq ($(CONFIG_MMU),y) obj-y += nommu.o @@ -27,6 +27,10 @@ obj-$(CONFIG_CPU_ABRT_EV5TJ) += abort-ev5tj.o obj-$(CONFIG_CPU_ABRT_EV6) += abort-ev6.o obj-$(CONFIG_CPU_ABRT_EV7) += abort-ev7.o +obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o +obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o +obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o + obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o @@ -83,4 +87,4 @@ obj-$(CONFIG_CPU_V7) += proc-v7.o obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o - +obj-$(CONFIG_CACHE_TAUROS2) += cache-tauros2.o diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S index b63a8f7..a89444a 100644 --- a/arch/arm/mm/cache-fa.S +++ b/arch/arm/mm/cache-fa.S @@ -127,15 +127,16 @@ ENTRY(fa_coherent_user_range) mov pc, lr /* - * flush_kern_dcache_page(kaddr) + * flush_kern_dcache_area(void *addr, size_t size) * * Ensure that the data held in the page kaddr is written back * to the page in question. * - * - kaddr - kernel address (guaranteed to be page aligned) + * - addr - kernel address + * - size - size of region */ -ENTRY(fa_flush_kern_dcache_page) - add r1, r0, #PAGE_SZ +ENTRY(fa_flush_kern_dcache_area) + add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line add r0, r0, #CACHE_DLINESIZE cmp r0, r1 @@ -213,7 +214,7 @@ ENTRY(fa_cache_fns) .long fa_flush_user_cache_range .long fa_coherent_kern_range .long fa_coherent_user_range - .long fa_flush_kern_dcache_page + .long fa_flush_kern_dcache_area .long fa_dma_inv_range .long fa_dma_clean_range .long fa_dma_flush_range diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index b480f1d..cb8fc65 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -28,69 +28,120 @@ static void __iomem *l2x0_base; static DEFINE_SPINLOCK(l2x0_lock); -static inline void sync_writel(unsigned long val, unsigned long reg, - unsigned long complete_mask) +static inline void cache_wait(void __iomem *reg, unsigned long mask) { - unsigned long flags; - - spin_lock_irqsave(&l2x0_lock, flags); - writel(val, l2x0_base + reg); /* wait for the operation to complete */ - while (readl(l2x0_base + reg) & complete_mask) + while (readl(reg) & mask) ; - spin_unlock_irqrestore(&l2x0_lock, flags); } static inline void cache_sync(void) { - sync_writel(0, L2X0_CACHE_SYNC, 1); + void __iomem *base = l2x0_base; + writel(0, base + L2X0_CACHE_SYNC); + cache_wait(base + L2X0_CACHE_SYNC, 1); } static inline void l2x0_inv_all(void) { + unsigned long flags; + /* invalidate all ways */ - sync_writel(0xff, L2X0_INV_WAY, 0xff); + spin_lock_irqsave(&l2x0_lock, flags); + writel(0xff, l2x0_base + L2X0_INV_WAY); + cache_wait(l2x0_base + L2X0_INV_WAY, 0xff); cache_sync(); + spin_unlock_irqrestore(&l2x0_lock, flags); } static void l2x0_inv_range(unsigned long start, unsigned long end) { - unsigned long addr; + void __iomem *base = l2x0_base; + unsigned long flags; + spin_lock_irqsave(&l2x0_lock, flags); if (start & (CACHE_LINE_SIZE - 1)) { start &= ~(CACHE_LINE_SIZE - 1); - sync_writel(start, L2X0_CLEAN_INV_LINE_PA, 1); + cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); + writel(start, base + L2X0_CLEAN_INV_LINE_PA); start += CACHE_LINE_SIZE; } if (end & (CACHE_LINE_SIZE - 1)) { end &= ~(CACHE_LINE_SIZE - 1); - sync_writel(end, L2X0_CLEAN_INV_LINE_PA, 1); + cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); + writel(end, base + L2X0_CLEAN_INV_LINE_PA); } - for (addr = start; addr < end; addr += CACHE_LINE_SIZE) - sync_writel(addr, L2X0_INV_LINE_PA, 1); + while (start < end) { + unsigned long blk_end = start + min(end - start, 4096UL); + + while (start < blk_end) { + cache_wait(base + L2X0_INV_LINE_PA, 1); + writel(start, base + L2X0_INV_LINE_PA); + start += CACHE_LINE_SIZE; + } + + if (blk_end < end) { + spin_unlock_irqrestore(&l2x0_lock, flags); + spin_lock_irqsave(&l2x0_lock, flags); + } + } + cache_wait(base + L2X0_INV_LINE_PA, 1); cache_sync(); + spin_unlock_irqrestore(&l2x0_lock, flags); } static void l2x0_clean_range(unsigned long start, unsigned long end) { - unsigned long addr; + void __iomem *base = l2x0_base; + unsigned long flags; + spin_lock_irqsave(&l2x0_lock, flags); start &= ~(CACHE_LINE_SIZE - 1); - for (addr = start; addr < end; addr += CACHE_LINE_SIZE) - sync_writel(addr, L2X0_CLEAN_LINE_PA, 1); + while (start < end) { + unsigned long blk_end = start + min(end - start, 4096UL); + + while (start < blk_end) { + cache_wait(base + L2X0_CLEAN_LINE_PA, 1); + writel(start, base + L2X0_CLEAN_LINE_PA); + start += CACHE_LINE_SIZE; + } + + if (blk_end < end) { + spin_unlock_irqrestore(&l2x0_lock, flags); + spin_lock_irqsave(&l2x0_lock, flags); + } + } + cache_wait(base + L2X0_CLEAN_LINE_PA, 1); cache_sync(); + spin_unlock_irqrestore(&l2x0_lock, flags); } static void l2x0_flush_range(unsigned long start, unsigned long end) { - unsigned long addr; + void __iomem *base = l2x0_base; + unsigned long flags; + spin_lock_irqsave(&l2x0_lock, flags); start &= ~(CACHE_LINE_SIZE - 1); - for (addr = start; addr < end; addr += CACHE_LINE_SIZE) - sync_writel(addr, L2X0_CLEAN_INV_LINE_PA, 1); + while (start < end) { + unsigned long blk_end = start + min(end - start, 4096UL); + + while (start < blk_end) { + cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); + writel(start, base + L2X0_CLEAN_INV_LINE_PA); + start += CACHE_LINE_SIZE; + } + + if (blk_end < end) { + spin_unlock_irqrestore(&l2x0_lock, flags); + spin_lock_irqsave(&l2x0_lock, flags); + } + } + cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); cache_sync(); + spin_unlock_irqrestore(&l2x0_lock, flags); } void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) @@ -99,18 +150,25 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) l2x0_base = base; - /* disable L2X0 */ - writel(0, l2x0_base + L2X0_CTRL); + /* + * Check if l2x0 controller is already enabled. + * If you are booting from non-secure mode + * accessing the below registers will fault. + */ + if (!(readl(l2x0_base + L2X0_CTRL) & 1)) { + + /* l2x0 controller is disabled */ - aux = readl(l2x0_base + L2X0_AUX_CTRL); - aux &= aux_mask; - aux |= aux_val; - writel(aux, l2x0_base + L2X0_AUX_CTRL); + aux = readl(l2x0_base + L2X0_AUX_CTRL); + aux &= aux_mask; + aux |= aux_val; + writel(aux, l2x0_base + L2X0_AUX_CTRL); - l2x0_inv_all(); + l2x0_inv_all(); - /* enable L2X0 */ - writel(1, l2x0_base + L2X0_CTRL); + /* enable L2X0 */ + writel(1, l2x0_base + L2X0_CTRL); + } outer_cache.inv_range = l2x0_inv_range; outer_cache.clean_range = l2x0_clean_range; diff --git a/arch/arm/mm/cache-tauros2.c b/arch/arm/mm/cache-tauros2.c new file mode 100644 index 0000000..5086865 --- /dev/null +++ b/arch/arm/mm/cache-tauros2.c @@ -0,0 +1,263 @@ +/* + * arch/arm/mm/cache-tauros2.c - Tauros2 L2 cache controller support + * + * Copyright (C) 2008 Marvell Semiconductor + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + * References: + * - PJ1 CPU Core Datasheet, + * Document ID MV-S104837-01, Rev 0.7, January 24 2008. + * - PJ4 CPU Core Datasheet, + * Document ID MV-S105190-00, Rev 0.7, March 14 2008. + */ + +#include <linux/init.h> +#include <asm/cacheflush.h> +#include <asm/hardware/cache-tauros2.h> + + +/* + * When Tauros2 is used on a CPU that supports the v7 hierarchical + * cache operations, the cache handling code in proc-v7.S takes care + * of everything, including handling DMA coherency. + * + * So, we only need to register outer cache operations here if we're + * being used on a pre-v7 CPU, and we only need to build support for + * outer cache operations into the kernel image if the kernel has been + * configured to support a pre-v7 CPU. + */ +#if __LINUX_ARM_ARCH__ < 7 +/* + * Low-level cache maintenance operations. + */ +static inline void tauros2_clean_pa(unsigned long addr) +{ + __asm__("mcr p15, 1, %0, c7, c11, 3" : : "r" (addr)); +} + +static inline void tauros2_clean_inv_pa(unsigned long addr) +{ + __asm__("mcr p15, 1, %0, c7, c15, 3" : : "r" (addr)); +} + +static inline void tauros2_inv_pa(unsigned long addr) +{ + __asm__("mcr p15, 1, %0, c7, c7, 3" : : "r" (addr)); +} + + +/* + * Linux primitives. + * + * Note that the end addresses passed to Linux primitives are + * noninclusive. + */ +#define CACHE_LINE_SIZE 32 + +static void tauros2_inv_range(unsigned long start, unsigned long end) +{ + /* + * Clean and invalidate partial first cache line. + */ + if (start & (CACHE_LINE_SIZE - 1)) { + tauros2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1)); + start = (start | (CACHE_LINE_SIZE - 1)) + 1; + } + + /* + * Clean and invalidate partial last cache line. + */ + if (end & (CACHE_LINE_SIZE - 1)) { + tauros2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1)); + end &= ~(CACHE_LINE_SIZE - 1); + } + + /* + * Invalidate all full cache lines between 'start' and 'end'. + */ + while (start < end) { + tauros2_inv_pa(start); + start += CACHE_LINE_SIZE; + } + + dsb(); +} + +static void tauros2_clean_range(unsigned long start, unsigned long end) +{ + start &= ~(CACHE_LINE_SIZE - 1); + while (start < end) { + tauros2_clean_pa(start); + start += CACHE_LINE_SIZE; + } + + dsb(); +} + +static void tauros2_flush_range(unsigned long start, unsigned long end) +{ + start &= ~(CACHE_LINE_SIZE - 1); + while (start < end) { + tauros2_clean_inv_pa(start); + start += CACHE_LINE_SIZE; + } + + dsb(); +} +#endif + +static inline u32 __init read_extra_features(void) +{ + u32 u; + + __asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (u)); + + return u; +} + +static inline void __init write_extra_features(u32 u) +{ + __asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u)); +} + +static void __init disable_l2_prefetch(void) +{ + u32 u; + + /* + * Read the CPU Extra Features register and verify that the + * Disable L2 Prefetch bit is set. + */ + u = read_extra_features(); + if (!(u & 0x01000000)) { + printk(KERN_INFO "Tauros2: Disabling L2 prefetch.\n"); + write_extra_features(u | 0x01000000); + } +} + +static inline int __init cpuid_scheme(void) +{ + extern int processor_id; + + return !!((processor_id & 0x000f0000) == 0x000f0000); +} + +static inline u32 __init read_mmfr3(void) +{ + u32 mmfr3; + + __asm__("mrc p15, 0, %0, c0, c1, 7\n" : "=r" (mmfr3)); + + return mmfr3; +} + +static inline u32 __init read_actlr(void) +{ + u32 actlr; + + __asm__("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr)); + + return actlr; +} + +static inline void __init write_actlr(u32 actlr) +{ + __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr)); +} + +void __init tauros2_init(void) +{ + extern int processor_id; + char *mode; + + disable_l2_prefetch(); + +#ifdef CONFIG_CPU_32v5 + if ((processor_id & 0xff0f0000) == 0x56050000) { + u32 feat; + + /* + * v5 CPUs with Tauros2 have the L2 cache enable bit + * located in the CPU Extra Features register. + */ + feat = read_extra_features(); + if (!(feat & 0x00400000)) { + printk(KERN_INFO "Tauros2: Enabling L2 cache.\n"); + write_extra_features(feat | 0x00400000); + } + + mode = "ARMv5"; + outer_cache.inv_range = tauros2_inv_range; + outer_cache.clean_range = tauros2_clean_range; + outer_cache.flush_range = tauros2_flush_range; + } +#endif + +#ifdef CONFIG_CPU_32v6 + /* + * Check whether this CPU lacks support for the v7 hierarchical + * cache ops. (PJ4 is in its v6 personality mode if the MMFR3 + * register indicates no support for the v7 hierarchical cache + * ops.) + */ + if (cpuid_scheme() && (read_mmfr3() & 0xf) == 0) { + /* + * When Tauros2 is used in an ARMv6 system, the L2 + * enable bit is in the ARMv6 ARM-mandated position + * (bit [26] of the System Control Register). + */ + if (!(get_cr() & 0x04000000)) { + printk(KERN_INFO "Tauros2: Enabling L2 cache.\n"); + adjust_cr(0x04000000, 0x04000000); + } + + mode = "ARMv6"; + outer_cache.inv_range = tauros2_inv_range; + outer_cache.clean_range = tauros2_clean_range; + outer_cache.flush_range = tauros2_flush_range; + } +#endif + +#ifdef CONFIG_CPU_32v7 + /* + * Check whether this CPU has support for the v7 hierarchical + * cache ops. (PJ4 is in its v7 personality mode if the MMFR3 + * register indicates support for the v7 hierarchical cache + * ops.) + * + * (Although strictly speaking there may exist CPUs that + * implement the v7 cache ops but are only ARMv6 CPUs (due to + * not complying with all of the other ARMv7 requirements), + * there are no real-life examples of Tauros2 being used on + * such CPUs as of yet.) + */ + if (cpuid_scheme() && (read_mmfr3() & 0xf) == 1) { + u32 actlr; + + /* + * When Tauros2 is used in an ARMv7 system, the L2 + * enable bit is located in the Auxiliary System Control + * Register (which is the only register allowed by the + * ARMv7 spec to contain fine-grained cache control bits). + */ + actlr = read_actlr(); + if (!(actlr & 0x00000002)) { + printk(KERN_INFO "Tauros2: Enabling L2 cache.\n"); + write_actlr(actlr | 0x00000002); + } + + mode = "ARMv7"; + } +#endif + + if (mode == NULL) { + printk(KERN_CRIT "Tauros2: Unable to detect CPU mode.\n"); + return; + } + + printk(KERN_INFO "Tauros2: L2 cache support initialised " + "in %s mode.\n", mode); +} diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S index 8a4abeb..2a48273 100644 --- a/arch/arm/mm/cache-v3.S +++ b/arch/arm/mm/cache-v3.S @@ -72,14 +72,15 @@ ENTRY(v3_coherent_user_range) mov pc, lr /* - * flush_kern_dcache_page(void *page) + * flush_kern_dcache_area(void *page, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * - * - addr - page aligned address + * - addr - kernel address + * - size - region size */ -ENTRY(v3_flush_kern_dcache_page) +ENTRY(v3_flush_kern_dcache_area) /* FALLTHROUGH */ /* @@ -129,7 +130,7 @@ ENTRY(v3_cache_fns) .long v3_flush_user_cache_range .long v3_coherent_kern_range .long v3_coherent_user_range - .long v3_flush_kern_dcache_page + .long v3_flush_kern_dcache_area .long v3_dma_inv_range .long v3_dma_clean_range .long v3_dma_flush_range diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index 3668611..5c7da3e 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S @@ -82,14 +82,15 @@ ENTRY(v4_coherent_user_range) mov pc, lr /* - * flush_kern_dcache_page(void *page) + * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * - * - addr - page aligned address + * - addr - kernel address + * - size - region size */ -ENTRY(v4_flush_kern_dcache_page) +ENTRY(v4_flush_kern_dcache_area) /* FALLTHROUGH */ /* @@ -141,7 +142,7 @@ ENTRY(v4_cache_fns) .long v4_flush_user_cache_range .long v4_coherent_kern_range .long v4_coherent_user_range - .long v4_flush_kern_dcache_page + .long v4_flush_kern_dcache_area .long v4_dma_inv_range .long v4_dma_clean_range .long v4_dma_flush_range diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S index 2ebc1b3..3dbedf1e 100644 --- a/arch/arm/mm/cache-v4wb.S +++ b/arch/arm/mm/cache-v4wb.S @@ -114,15 +114,16 @@ ENTRY(v4wb_flush_user_cache_range) mov pc, lr /* - * flush_kern_dcache_page(void *page) + * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * - * - addr - page aligned address + * - addr - kernel address + * - size - region size */ -ENTRY(v4wb_flush_kern_dcache_page) - add r1, r0, #PAGE_SZ +ENTRY(v4wb_flush_kern_dcache_area) + add r1, r0, r1 /* fall through */ /* @@ -224,7 +225,7 @@ ENTRY(v4wb_cache_fns) .long v4wb_flush_user_cache_range .long v4wb_coherent_kern_range .long v4wb_coherent_user_range - .long v4wb_flush_kern_dcache_page + .long v4wb_flush_kern_dcache_area .long v4wb_dma_inv_range .long v4wb_dma_clean_range .long v4wb_dma_flush_range diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S index c54fa2c..b3b7410 100644 --- a/arch/arm/mm/cache-v4wt.S +++ b/arch/arm/mm/cache-v4wt.S @@ -117,17 +117,18 @@ ENTRY(v4wt_coherent_user_range) mov pc, lr /* - * flush_kern_dcache_page(void *page) + * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * - * - addr - page aligned address + * - addr - kernel address + * - size - region size */ -ENTRY(v4wt_flush_kern_dcache_page) +ENTRY(v4wt_flush_kern_dcache_area) mov r2, #0 mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache - add r1, r0, #PAGE_SZ + add r1, r0, r1 /* fallthrough */ /* @@ -180,7 +181,7 @@ ENTRY(v4wt_cache_fns) .long v4wt_flush_user_cache_range .long v4wt_coherent_kern_range .long v4wt_coherent_user_range - .long v4wt_flush_kern_dcache_page + .long v4wt_flush_kern_dcache_area .long v4wt_dma_inv_range .long v4wt_dma_clean_range .long v4wt_dma_flush_range diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 8f5c13f..4ba0a24 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -12,6 +12,7 @@ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> +#include <asm/unwind.h> #include "proc-macros.S" @@ -121,11 +122,13 @@ ENTRY(v6_coherent_kern_range) * - the Icache does not read data from the write buffer */ ENTRY(v6_coherent_user_range) - + UNWIND(.fnstart ) #ifdef HARVARD_CACHE bic r0, r0, #CACHE_LINE_SIZE - 1 -1: mcr p15, 0, r0, c7, c10, 1 @ clean D line +1: + USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line add r0, r0, #CACHE_LINE_SIZE +2: cmp r0, r1 blo 1b #endif @@ -143,15 +146,29 @@ ENTRY(v6_coherent_user_range) mov pc, lr /* - * v6_flush_kern_dcache_page(kaddr) + * Fault handling for the cache operation above. If the virtual address in r0 + * isn't mapped, just try the next page. + */ +9001: + mov r0, r0, lsr #12 + mov r0, r0, lsl #12 + add r0, r0, #4096 + b 2b + UNWIND(.fnend ) +ENDPROC(v6_coherent_user_range) +ENDPROC(v6_coherent_kern_range) + +/* + * v6_flush_kern_dcache_area(void *addr, size_t size) * * Ensure that the data held in the page kaddr is written back * to the page in question. * - * - kaddr - kernel address (guaranteed to be page aligned) + * - addr - kernel address + * - size - region size */ -ENTRY(v6_flush_kern_dcache_page) - add r1, r0, #PAGE_SZ +ENTRY(v6_flush_kern_dcache_area) + add r1, r0, r1 1: #ifdef HARVARD_CACHE mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line @@ -255,7 +272,7 @@ ENTRY(v6_cache_fns) .long v6_flush_user_cache_range .long v6_coherent_kern_range .long v6_coherent_user_range - .long v6_flush_kern_dcache_page + .long v6_flush_kern_dcache_area .long v6_dma_inv_range .long v6_dma_clean_range .long v6_dma_flush_range diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index bda0ec3..9073db8 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -13,6 +13,7 @@ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> +#include <asm/unwind.h> #include "proc-macros.S" @@ -153,13 +154,16 @@ ENTRY(v7_coherent_kern_range) * - the Icache does not read data from the write buffer */ ENTRY(v7_coherent_user_range) + UNWIND(.fnstart ) dcache_line_size r2, r3 sub r3, r2, #1 bic r0, r0, r3 -1: mcr p15, 0, r0, c7, c11, 1 @ clean D line to the point of unification +1: + USER( mcr p15, 0, r0, c7, c11, 1 ) @ clean D line to the point of unification dsb - mcr p15, 0, r0, c7, c5, 1 @ invalidate I line + USER( mcr p15, 0, r0, c7, c5, 1 ) @ invalidate I line add r0, r0, r2 +2: cmp r0, r1 blo 1b mov r0, #0 @@ -167,20 +171,32 @@ ENTRY(v7_coherent_user_range) dsb isb mov pc, lr + +/* + * Fault handling for the cache operation above. If the virtual address in r0 + * isn't mapped, just try the next page. + */ +9001: + mov r0, r0, lsr #12 + mov r0, r0, lsl #12 + add r0, r0, #4096 + b 2b + UNWIND(.fnend ) ENDPROC(v7_coherent_kern_range) ENDPROC(v7_coherent_user_range) /* - * v7_flush_kern_dcache_page(kaddr) + * v7_flush_kern_dcache_area(void *addr, size_t size) * * Ensure that the data held in the page kaddr is written back * to the page in question. * - * - kaddr - kernel address (guaranteed to be page aligned) + * - addr - kernel address + * - size - region size */ -ENTRY(v7_flush_kern_dcache_page) +ENTRY(v7_flush_kern_dcache_area) dcache_line_size r2, r3 - add r1, r0, #PAGE_SZ + add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line add r0, r0, r2 @@ -188,7 +204,7 @@ ENTRY(v7_flush_kern_dcache_page) blo 1b dsb mov pc, lr -ENDPROC(v7_flush_kern_dcache_page) +ENDPROC(v7_flush_kern_dcache_area) /* * v7_dma_inv_range(start,end) @@ -264,7 +280,7 @@ ENTRY(v7_cache_fns) .long v7_flush_user_cache_range .long v7_coherent_kern_range .long v7_coherent_user_range - .long v7_flush_kern_dcache_page + .long v7_flush_kern_dcache_area .long v7_dma_inv_range .long v7_dma_clean_range .long v7_dma_flush_range diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 6bda76a..a9e22e3 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -50,10 +50,7 @@ void __new_context(struct mm_struct *mm) isb(); flush_tlb_all(); if (icache_is_vivt_asid_tagged()) { - asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n" - "mcr p15, 0, %0, c7, c5, 6 @ flush BTAC/BTB\n" - : - : "r" (0)); + __flush_icache_all(); dsb(); } } diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 4127a7b..0fa1319 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -41,6 +41,14 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, kfrom = kmap_atomic(from, KM_USER0); kto = kmap_atomic(to, KM_USER1); copy_page(kto, kfrom); +#ifdef CONFIG_HIGHMEM + /* + * kmap_atomic() doesn't set the page virtual address, and + * kunmap_atomic() takes care of cache flushing already. + */ + if (page_address(to) != NULL) +#endif + __cpuc_flush_dcache_area(kto, PAGE_SIZE); kunmap_atomic(kto, KM_USER1); kunmap_atomic(kfrom, KM_USER0); } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index b30925f..26325cb 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -63,194 +63,152 @@ static u64 get_coherent_dma_mask(struct device *dev) return mask; } -#ifdef CONFIG_MMU /* - * These are the page tables (2MB each) covering uncached, DMA consistent allocations + * Allocate a DMA buffer for 'dev' of size 'size' using the + * specified gfp mask. Note that 'size' must be page aligned. */ -static pte_t *consistent_pte[NUM_CONSISTENT_PTES]; -static DEFINE_SPINLOCK(consistent_lock); +static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) +{ + unsigned long order = get_order(size); + struct page *page, *p, *e; + void *ptr; + u64 mask = get_coherent_dma_mask(dev); -/* - * VM region handling support. - * - * This should become something generic, handling VM region allocations for - * vmalloc and similar (ioremap, module space, etc). - * - * I envisage vmalloc()'s supporting vm_struct becoming: - * - * struct vm_struct { - * struct vm_region region; - * unsigned long flags; - * struct page **pages; - * unsigned int nr_pages; - * unsigned long phys_addr; - * }; - * - * get_vm_area() would then call vm_region_alloc with an appropriate - * struct vm_region head (eg): - * - * struct vm_region vmalloc_head = { - * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), - * .vm_start = VMALLOC_START, - * .vm_end = VMALLOC_END, - * }; - * - * However, vmalloc_head.vm_start is variable (typically, it is dependent on - * the amount of RAM found at boot time.) I would imagine that get_vm_area() - * would have to initialise this each time prior to calling vm_region_alloc(). - */ -struct arm_vm_region { - struct list_head vm_list; - unsigned long vm_start; - unsigned long vm_end; - struct page *vm_pages; - int vm_active; -}; +#ifdef CONFIG_DMA_API_DEBUG + u64 limit = (mask + 1) & ~mask; + if (limit && size >= limit) { + dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", + size, mask); + return NULL; + } +#endif -static struct arm_vm_region consistent_head = { - .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), - .vm_start = CONSISTENT_BASE, - .vm_end = CONSISTENT_END, -}; + if (!mask) + return NULL; -static struct arm_vm_region * -arm_vm_region_alloc(struct arm_vm_region *head, size_t size, gfp_t gfp) -{ - unsigned long addr = head->vm_start, end = head->vm_end - size; - unsigned long flags; - struct arm_vm_region *c, *new; - - new = kmalloc(sizeof(struct arm_vm_region), gfp); - if (!new) - goto out; - - spin_lock_irqsave(&consistent_lock, flags); - - list_for_each_entry(c, &head->vm_list, vm_list) { - if ((addr + size) < addr) - goto nospc; - if ((addr + size) <= c->vm_start) - goto found; - addr = c->vm_end; - if (addr > end) - goto nospc; - } + if (mask < 0xffffffffULL) + gfp |= GFP_DMA; + + page = alloc_pages(gfp, order); + if (!page) + return NULL; - found: /* - * Insert this entry _before_ the one we found. + * Now split the huge page and free the excess pages */ - list_add_tail(&new->vm_list, &c->vm_list); - new->vm_start = addr; - new->vm_end = addr + size; - new->vm_active = 1; - - spin_unlock_irqrestore(&consistent_lock, flags); - return new; - - nospc: - spin_unlock_irqrestore(&consistent_lock, flags); - kfree(new); - out: - return NULL; + split_page(page, order); + for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) + __free_page(p); + + /* + * Ensure that the allocated pages are zeroed, and that any data + * lurking in the kernel direct-mapped region is invalidated. + */ + ptr = page_address(page); + memset(ptr, 0, size); + dmac_flush_range(ptr, ptr + size); + outer_flush_range(__pa(ptr), __pa(ptr) + size); + + return page; } -static struct arm_vm_region *arm_vm_region_find(struct arm_vm_region *head, unsigned long addr) +/* + * Free a DMA buffer. 'size' must be page aligned. + */ +static void __dma_free_buffer(struct page *page, size_t size) { - struct arm_vm_region *c; - - list_for_each_entry(c, &head->vm_list, vm_list) { - if (c->vm_active && c->vm_start == addr) - goto out; + struct page *e = page + (size >> PAGE_SHIFT); + + while (page < e) { + __free_page(page); + page++; } - c = NULL; - out: - return c; } +#ifdef CONFIG_MMU +/* + * These are the page tables (2MB each) covering uncached, DMA consistent allocations + */ +static pte_t *consistent_pte[NUM_CONSISTENT_PTES]; + +#include "vmregion.h" + +static struct arm_vmregion_head consistent_head = { + .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock), + .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), + .vm_start = CONSISTENT_BASE, + .vm_end = CONSISTENT_END, +}; + #ifdef CONFIG_HUGETLB_PAGE #error ARM Coherent DMA allocator does not (yet) support huge TLB #endif -static void * -__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, - pgprot_t prot) +/* + * Initialise the consistent memory allocation. + */ +static int __init consistent_init(void) { - struct page *page; - struct arm_vm_region *c; - unsigned long order; - u64 mask = get_coherent_dma_mask(dev); - u64 limit; + int ret = 0; + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + int i = 0; + u32 base = CONSISTENT_BASE; - if (!consistent_pte[0]) { - printk(KERN_ERR "%s: not initialised\n", __func__); - dump_stack(); - return NULL; - } + do { + pgd = pgd_offset(&init_mm, base); + pmd = pmd_alloc(&init_mm, pgd, base); + if (!pmd) { + printk(KERN_ERR "%s: no pmd tables\n", __func__); + ret = -ENOMEM; + break; + } + WARN_ON(!pmd_none(*pmd)); - if (!mask) - goto no_page; + pte = pte_alloc_kernel(pmd, base); + if (!pte) { + printk(KERN_ERR "%s: no pte tables\n", __func__); + ret = -ENOMEM; + break; + } - /* - * Sanity check the allocation size. - */ - size = PAGE_ALIGN(size); - limit = (mask + 1) & ~mask; - if ((limit && size >= limit) || - size >= (CONSISTENT_END - CONSISTENT_BASE)) { - printk(KERN_WARNING "coherent allocation too big " - "(requested %#x mask %#llx)\n", size, mask); - goto no_page; - } + consistent_pte[i++] = pte; + base += (1 << PGDIR_SHIFT); + } while (base < CONSISTENT_END); - order = get_order(size); + return ret; +} - if (mask != 0xffffffff) - gfp |= GFP_DMA; +core_initcall(consistent_init); - page = alloc_pages(gfp, order); - if (!page) - goto no_page; +static void * +__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) +{ + struct arm_vmregion *c; - /* - * Invalidate any data that might be lurking in the - * kernel direct-mapped region for device DMA. - */ - { - void *ptr = page_address(page); - memset(ptr, 0, size); - dmac_flush_range(ptr, ptr + size); - outer_flush_range(__pa(ptr), __pa(ptr) + size); + if (!consistent_pte[0]) { + printk(KERN_ERR "%s: not initialised\n", __func__); + dump_stack(); + return NULL; } /* * Allocate a virtual address in the consistent mapping region. */ - c = arm_vm_region_alloc(&consistent_head, size, + c = arm_vmregion_alloc(&consistent_head, size, gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); if (c) { pte_t *pte; - struct page *end = page + (1 << order); int idx = CONSISTENT_PTE_INDEX(c->vm_start); u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); pte = consistent_pte[idx] + off; c->vm_pages = page; - split_page(page, order); - - /* - * Set the "dma handle" - */ - *handle = page_to_dma(dev, page); - do { BUG_ON(!pte_none(*pte)); - /* - * x86 does not mark the pages reserved... - */ - SetPageReserved(page); set_pte_ext(pte, mk_pte(page, prot), 0); page++; pte++; @@ -261,48 +219,90 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, } } while (size -= PAGE_SIZE); - /* - * Free the otherwise unused pages. - */ - while (page < end) { - __free_page(page); - page++; - } - return (void *)c->vm_start; } - - if (page) - __free_pages(page, order); - no_page: - *handle = ~0; return NULL; } + +static void __dma_free_remap(void *cpu_addr, size_t size) +{ + struct arm_vmregion *c; + unsigned long addr; + pte_t *ptep; + int idx; + u32 off; + + c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr); + if (!c) { + printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", + __func__, cpu_addr); + dump_stack(); + return; + } + + if ((c->vm_end - c->vm_start) != size) { + printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", + __func__, c->vm_end - c->vm_start, size); + dump_stack(); + size = c->vm_end - c->vm_start; + } + + idx = CONSISTENT_PTE_INDEX(c->vm_start); + off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); + ptep = consistent_pte[idx] + off; + addr = c->vm_start; + do { + pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); + + ptep++; + addr += PAGE_SIZE; + off++; + if (off >= PTRS_PER_PTE) { + off = 0; + ptep = consistent_pte[++idx]; + } + + if (pte_none(pte) || !pte_present(pte)) + printk(KERN_CRIT "%s: bad page in kernel page table\n", + __func__); + } while (size -= PAGE_SIZE); + + flush_tlb_kernel_range(c->vm_start, c->vm_end); + + arm_vmregion_free(&consistent_head, c); +} + #else /* !CONFIG_MMU */ + +#define __dma_alloc_remap(page, size, gfp, prot) page_address(page) +#define __dma_free_remap(addr, size) do { } while (0) + +#endif /* CONFIG_MMU */ + static void * __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, pgprot_t prot) { - void *virt; - u64 mask = get_coherent_dma_mask(dev); + struct page *page; + void *addr; - if (!mask) - goto error; + *handle = ~0; + size = PAGE_ALIGN(size); - if (mask != 0xffffffff) - gfp |= GFP_DMA; - virt = kmalloc(size, gfp); - if (!virt) - goto error; + page = __dma_alloc_buffer(dev, size, gfp); + if (!page) + return NULL; - *handle = virt_to_dma(dev, virt); - return virt; + if (!arch_is_coherent()) + addr = __dma_alloc_remap(page, size, gfp, prot); + else + addr = page_address(page); -error: - *handle = ~0; - return NULL; + if (addr) + *handle = page_to_dma(dev, page); + + return addr; } -#endif /* CONFIG_MMU */ /* * Allocate DMA-coherent memory space and return both the kernel remapped @@ -316,19 +316,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gf if (dma_alloc_from_coherent(dev, size, handle, &memory)) return memory; - if (arch_is_coherent()) { - void *virt; - - virt = kmalloc(size, gfp); - if (!virt) - return NULL; - *handle = virt_to_dma(dev, virt); - - return virt; - } - return __dma_alloc(dev, size, handle, gfp, - pgprot_noncached(pgprot_kernel)); + pgprot_dmacoherent(pgprot_kernel)); } EXPORT_SYMBOL(dma_alloc_coherent); @@ -349,15 +338,12 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma, { int ret = -ENXIO; #ifdef CONFIG_MMU - unsigned long flags, user_size, kern_size; - struct arm_vm_region *c; + unsigned long user_size, kern_size; + struct arm_vmregion *c; user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; - spin_lock_irqsave(&consistent_lock, flags); - c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr); - spin_unlock_irqrestore(&consistent_lock, flags); - + c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr); if (c) { unsigned long off = vma->vm_pgoff; @@ -379,7 +365,7 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma, int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size) { - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot); return dma_mmap(dev, vma, cpu_addr, dma_addr, size); } EXPORT_SYMBOL(dma_mmap_coherent); @@ -396,144 +382,23 @@ EXPORT_SYMBOL(dma_mmap_writecombine); * free a page as defined by the above mapping. * Must not be called with IRQs disabled. */ -#ifdef CONFIG_MMU void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) { - struct arm_vm_region *c; - unsigned long flags, addr; - pte_t *ptep; - int idx; - u32 off; - WARN_ON(irqs_disabled()); if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) return; - if (arch_is_coherent()) { - kfree(cpu_addr); - return; - } - size = PAGE_ALIGN(size); - spin_lock_irqsave(&consistent_lock, flags); - c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr); - if (!c) - goto no_area; - - c->vm_active = 0; - spin_unlock_irqrestore(&consistent_lock, flags); - - if ((c->vm_end - c->vm_start) != size) { - printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", - __func__, c->vm_end - c->vm_start, size); - dump_stack(); - size = c->vm_end - c->vm_start; - } - - idx = CONSISTENT_PTE_INDEX(c->vm_start); - off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); - ptep = consistent_pte[idx] + off; - addr = c->vm_start; - do { - pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); - unsigned long pfn; - - ptep++; - addr += PAGE_SIZE; - off++; - if (off >= PTRS_PER_PTE) { - off = 0; - ptep = consistent_pte[++idx]; - } - - if (!pte_none(pte) && pte_present(pte)) { - pfn = pte_pfn(pte); - - if (pfn_valid(pfn)) { - struct page *page = pfn_to_page(pfn); - - /* - * x86 does not mark the pages reserved... - */ - ClearPageReserved(page); - - __free_page(page); - continue; - } - } - - printk(KERN_CRIT "%s: bad page in kernel page table\n", - __func__); - } while (size -= PAGE_SIZE); - - flush_tlb_kernel_range(c->vm_start, c->vm_end); - - spin_lock_irqsave(&consistent_lock, flags); - list_del(&c->vm_list); - spin_unlock_irqrestore(&consistent_lock, flags); + if (!arch_is_coherent()) + __dma_free_remap(cpu_addr, size); - kfree(c); - return; - - no_area: - spin_unlock_irqrestore(&consistent_lock, flags); - printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", - __func__, cpu_addr); - dump_stack(); + __dma_free_buffer(dma_to_page(dev, handle), size); } -#else /* !CONFIG_MMU */ -void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) -{ - if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) - return; - kfree(cpu_addr); -} -#endif /* CONFIG_MMU */ EXPORT_SYMBOL(dma_free_coherent); /* - * Initialise the consistent memory allocation. - */ -static int __init consistent_init(void) -{ - int ret = 0; -#ifdef CONFIG_MMU - pgd_t *pgd; - pmd_t *pmd; - pte_t *pte; - int i = 0; - u32 base = CONSISTENT_BASE; - - do { - pgd = pgd_offset(&init_mm, base); - pmd = pmd_alloc(&init_mm, pgd, base); - if (!pmd) { - printk(KERN_ERR "%s: no pmd tables\n", __func__); - ret = -ENOMEM; - break; - } - WARN_ON(!pmd_none(*pmd)); - - pte = pte_alloc_kernel(pmd, base); - if (!pte) { - printk(KERN_ERR "%s: no pte tables\n", __func__); - ret = -ENOMEM; - break; - } - - consistent_pte[i++] = pte; - base += (1 << PGDIR_SHIFT); - } while (base < CONSISTENT_END); -#endif /* !CONFIG_MMU */ - - return ret; -} - -core_initcall(consistent_init); - -/* * Make an area consistent for devices. * Note: Drivers should NOT use this function directly, as it will break * platforms with CONFIG_DMABOUNCE. diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index bc0099d..56ee153 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -23,6 +23,8 @@ #include <asm/pgtable.h> #include <asm/tlbflush.h> +#include "mm.h" + static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; /* @@ -151,16 +153,20 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) if (!pfn_valid(pfn)) return; + /* + * The zero page is never written to, so never has any dirty + * cache lines, and therefore never needs to be flushed. + */ page = pfn_to_page(pfn); + if (page == ZERO_PAGE(0)) + return; + mapping = page_mapping(page); - if (mapping) { #ifndef CONFIG_SMP - int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); - - if (dirty) - __flush_dcache_page(mapping, page); + if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) + __flush_dcache_page(mapping, page); #endif - + if (mapping) { if (cache_is_vivt()) make_coherent(mapping, vma, addr, pfn); else if (vma->vm_flags & VM_EXEC) @@ -201,9 +207,8 @@ void __init check_writebuffer_bugs(void) page = alloc_page(GFP_KERNEL); if (page) { unsigned long *p1, *p2; - pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG| - L_PTE_DIRTY|L_PTE_WRITE| - L_PTE_MT_BUFFERABLE); + pgprot_t prot = __pgprot_modify(PAGE_KERNEL, + L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE); p1 = vmap(&page, 1, VM_IOREMAP, prot); p2 = vmap(&page, 1, VM_IOREMAP, prot); diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 379f785..10e0680 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -292,6 +292,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) * down_read() */ might_sleep(); +#ifdef CONFIG_DEBUG_VM + if (!user_mode(regs) && + !search_exception_tables(regs->ARM_pc)) + goto no_context; +#endif } fault = __do_page_fault(mm, addr, fsr, tsk); @@ -519,9 +524,58 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) arm_notify_die("", regs, &info, fsr, 0); } + +static struct fsr_info ifsr_info[] = { + { do_bad, SIGBUS, 0, "unknown 0" }, + { do_bad, SIGBUS, 0, "unknown 1" }, + { do_bad, SIGBUS, 0, "debug event" }, + { do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" }, + { do_bad, SIGBUS, 0, "unknown 4" }, + { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, + { do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" }, + { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, + { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, + { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" }, + { do_bad, SIGBUS, 0, "unknown 10" }, + { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" }, + { do_bad, SIGBUS, 0, "external abort on translation" }, + { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" }, + { do_bad, SIGBUS, 0, "external abort on translation" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" }, + { do_bad, SIGBUS, 0, "unknown 16" }, + { do_bad, SIGBUS, 0, "unknown 17" }, + { do_bad, SIGBUS, 0, "unknown 18" }, + { do_bad, SIGBUS, 0, "unknown 19" }, + { do_bad, SIGBUS, 0, "unknown 20" }, + { do_bad, SIGBUS, 0, "unknown 21" }, + { do_bad, SIGBUS, 0, "unknown 22" }, + { do_bad, SIGBUS, 0, "unknown 23" }, + { do_bad, SIGBUS, 0, "unknown 24" }, + { do_bad, SIGBUS, 0, "unknown 25" }, + { do_bad, SIGBUS, 0, "unknown 26" }, + { do_bad, SIGBUS, 0, "unknown 27" }, + { do_bad, SIGBUS, 0, "unknown 28" }, + { do_bad, SIGBUS, 0, "unknown 29" }, + { do_bad, SIGBUS, 0, "unknown 30" }, + { do_bad, SIGBUS, 0, "unknown 31" }, +}; + asmlinkage void __exception -do_PrefetchAbort(unsigned long addr, struct pt_regs *regs) +do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) { - do_translation_fault(addr, FSR_LNX_PF, regs); + const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr); + struct siginfo info; + + if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs)) + return; + + printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n", + inf->name, ifsr, addr); + + info.si_signo = inf->sig; + info.si_errno = 0; + info.si_code = inf->code; + info.si_addr = (void __user *)addr; + arm_notify_die("", regs, &info, ifsr, 0); } diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index b279429..6f3a4b7 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -18,10 +18,6 @@ #include "mm.h" -#ifdef CONFIG_ARM_ERRATA_411920 -extern void v6_icache_inval_all(void); -#endif - #ifdef CONFIG_CPU_CACHE_VIPT #define ALIAS_FLUSH_START 0xffff4000 @@ -35,77 +31,61 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) flush_tlb_kernel_page(to); asm( "mcrr p15, 0, %1, %0, c14\n" - " mcr p15, 0, %2, c7, c10, 4\n" -#ifndef CONFIG_ARM_ERRATA_411920 - " mcr p15, 0, %2, c7, c5, 0\n" -#endif + " mcr p15, 0, %2, c7, c10, 4" : : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) : "cc"); -#ifdef CONFIG_ARM_ERRATA_411920 - v6_icache_inval_all(); -#endif } void flush_cache_mm(struct mm_struct *mm) { if (cache_is_vivt()) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) - __cpuc_flush_user_all(); + vivt_flush_cache_mm(mm); return; } if (cache_is_vipt_aliasing()) { asm( "mcr p15, 0, %0, c7, c14, 0\n" - " mcr p15, 0, %0, c7, c10, 4\n" -#ifndef CONFIG_ARM_ERRATA_411920 - " mcr p15, 0, %0, c7, c5, 0\n" -#endif + " mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : "cc"); -#ifdef CONFIG_ARM_ERRATA_411920 - v6_icache_inval_all(); -#endif } } void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (cache_is_vivt()) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) - __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), - vma->vm_flags); + vivt_flush_cache_range(vma, start, end); return; } if (cache_is_vipt_aliasing()) { asm( "mcr p15, 0, %0, c7, c14, 0\n" - " mcr p15, 0, %0, c7, c10, 4\n" -#ifndef CONFIG_ARM_ERRATA_411920 - " mcr p15, 0, %0, c7, c5, 0\n" -#endif + " mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : "cc"); -#ifdef CONFIG_ARM_ERRATA_411920 - v6_icache_inval_all(); -#endif } + + if (vma->vm_flags & VM_EXEC) + __flush_icache_all(); } void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) { if (cache_is_vivt()) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { - unsigned long addr = user_addr & PAGE_MASK; - __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); - } + vivt_flush_cache_page(vma, user_addr, pfn); return; } - if (cache_is_vipt_aliasing()) + if (cache_is_vipt_aliasing()) { flush_pfn_alias(pfn, user_addr); + __flush_icache_all(); + } + + if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) + __flush_icache_all(); } void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, @@ -113,15 +93,13 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, unsigned long len, int write) { if (cache_is_vivt()) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { - unsigned long addr = (unsigned long)kaddr; - __cpuc_coherent_kern_range(addr, addr + len); - } + vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write); return; } if (cache_is_vipt_aliasing()) { flush_pfn_alias(page_to_pfn(page), uaddr); + __flush_icache_all(); return; } @@ -139,6 +117,8 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, void __flush_dcache_page(struct address_space *mapping, struct page *page) { + void *addr = page_address(page); + /* * Writeback any data associated with the kernel mapping of this * page. This ensures that data in the physical page is mutually @@ -149,9 +129,9 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) * kmap_atomic() doesn't set the page virtual address, and * kunmap_atomic() takes care of cache flushing already. */ - if (page_address(page)) + if (addr) #endif - __cpuc_flush_dcache_page(page_address(page)); + __cpuc_flush_dcache_area(addr, PAGE_SIZE); /* * If this is a page cache page, and we have an aliasing VIPT cache, @@ -215,7 +195,16 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p */ void flush_dcache_page(struct page *page) { - struct address_space *mapping = page_mapping(page); + struct address_space *mapping; + + /* + * The zero page is never written to, so never has any dirty + * cache lines, and therefore never needs to be flushed. + */ + if (page == ZERO_PAGE(0)) + return; + + mapping = page_mapping(page); #ifndef CONFIG_SMP if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) @@ -261,6 +250,7 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l * userspace address only. */ flush_pfn_alias(pfn, vmaddr); + __flush_icache_all(); } /* @@ -268,5 +258,5 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l * in this mapping of the page. FIXME: this is overkill * since we actually ask for a write-back and invalidate. */ - __cpuc_flush_dcache_page(page_address(page)); + __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); } diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 73cae57..2be1ec7 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -46,6 +46,8 @@ void *kmap_atomic(struct page *page, enum km_type type) if (!PageHighMem(page)) return page_address(page); + debug_kmap_atomic(type); + kmap = kmap_high_get(page); if (kmap) return kmap; @@ -77,7 +79,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type) unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); if (kvaddr >= (void *)FIXADDR_START) { - __cpuc_flush_dcache_page((void *)vaddr); + __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 877c492..52c40d1 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -273,7 +273,6 @@ static void __init bootmem_init_node(int node, struct meminfo *mi, struct membank *bank = &mi->bank[i]; if (!bank->highmem) free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank)); - memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank)); } /* @@ -370,6 +369,19 @@ int pfn_valid(unsigned long pfn) return 0; } EXPORT_SYMBOL(pfn_valid); + +static void arm_memory_present(struct meminfo *mi, int node) +{ +} +#else +static void arm_memory_present(struct meminfo *mi, int node) +{ + int i; + for_each_nodebank(i, mi, node) { + struct membank *bank = &mi->bank[i]; + memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank)); + } +} #endif static int __init meminfo_cmp(const void *_a, const void *_b) @@ -427,6 +439,12 @@ void __init bootmem_init(void) */ if (node == initrd_node) bootmem_reserve_initrd(node); + + /* + * Sparsemem tries to allocate bootmem in memory_present(), + * so must be done after the fixed reservations + */ + arm_memory_present(mi, node); } /* @@ -483,7 +501,7 @@ free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) /* * Convert start_pfn/end_pfn to a struct page pointer. */ - start_pg = pfn_to_page(start_pfn); + start_pg = pfn_to_page(start_pfn - 1) + 1; end_pg = pfn_to_page(end_pfn); /* diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index c4f6f05..a888363 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -24,6 +24,8 @@ struct mem_type { const struct mem_type *get_mem_type(unsigned int type); +extern void __flush_dcache_page(struct address_space *mapping, struct page *page); + #endif struct map_desc; diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index f7457fe..f5abc51 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -54,7 +54,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, * We enforce the MAP_FIXED case. */ if (flags & MAP_FIXED) { - if (aliasing && flags & MAP_SHARED && addr & (SHMLBA - 1)) + if (aliasing && flags & MAP_SHARED && + (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) return -EINVAL; return addr; } @@ -124,7 +125,7 @@ int valid_phys_addr_range(unsigned long addr, size_t size) { if (addr < PHYS_OFFSET) return 0; - if (addr + size >= __pa(high_memory - 1)) + if (addr + size > __pa(high_memory - 1) + 1) return 0; return 1; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4426ee6..1708da8 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -21,6 +21,7 @@ #include <asm/cachetype.h> #include <asm/setup.h> #include <asm/sizes.h> +#include <asm/smp_plat.h> #include <asm/tlb.h> #include <asm/highmem.h> @@ -116,6 +117,13 @@ static void __init early_cachepolicy(char **p) } if (i == ARRAY_SIZE(cache_policies)) printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); + /* + * This restriction is partly to do with the way we boot; it is + * unpredictable to have memory mapped using two different sets of + * memory attributes (shared, type, and cache attribs). We can not + * change these attributes once the initial assembly has setup the + * page tables. + */ if (cpu_architecture() >= CPU_ARCH_ARMv6) { printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n"); cachepolicy = CPOLICY_WRITEBACK; @@ -445,8 +453,7 @@ static void __init build_mem_type_table(void) pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | - L_PTE_DIRTY | L_PTE_WRITE | - L_PTE_EXEC | kern_pgprot); + L_PTE_DIRTY | L_PTE_WRITE | kern_pgprot); mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; @@ -709,10 +716,6 @@ static void __init sanity_check_meminfo(void) if (meminfo.nr_banks >= NR_BANKS) { printk(KERN_CRIT "NR_BANKS too low, " "ignoring high memory\n"); - } else if (cache_is_vipt_aliasing()) { - printk(KERN_CRIT "HIGHMEM is not yet supported " - "with VIPT aliasing cache, " - "ignoring high memory\n"); } else { memmove(bank + 1, bank, (meminfo.nr_banks - i) * sizeof(*bank)); @@ -726,6 +729,8 @@ static void __init sanity_check_meminfo(void) bank->size = VMALLOC_MIN - __va(bank->start); } #else + bank->highmem = highmem; + /* * Check whether this memory bank would entirely overlap * the vmalloc area. @@ -754,6 +759,38 @@ static void __init sanity_check_meminfo(void) #endif j++; } +#ifdef CONFIG_HIGHMEM + if (highmem) { + const char *reason = NULL; + + if (cache_is_vipt_aliasing()) { + /* + * Interactions between kmap and other mappings + * make highmem support with aliasing VIPT caches + * rather difficult. + */ + reason = "with VIPT aliasing cache"; +#ifdef CONFIG_SMP + } else if (tlb_ops_need_broadcast()) { + /* + * kmap_high needs to occasionally flush TLB entries, + * however, if the TLB entries need to be broadcast + * we may deadlock: + * kmap_high(irqs off)->flush_all_zero_pkmaps-> + * flush_tlb_kernel_range->smp_call_function_many + * (must not be called with irqs off) + */ + reason = "without hardware TLB ops broadcasting"; +#endif + } + if (reason) { + printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", + reason); + while (j > 0 && meminfo.bank[j - 1].highmem) + j--; + } + } +#endif meminfo.nr_banks = j; } @@ -843,7 +880,7 @@ void __init reserve_node_zero(pg_data_t *pgdat) BOOTMEM_EXCLUSIVE); } - if (machine_is_treo680()) { + if (machine_is_treo680() || machine_is_centro()) { reserve_bootmem_node(pgdat, 0xa0000000, 0x1000, BOOTMEM_EXCLUSIVE); reserve_bootmem_node(pgdat, 0xa2000000, 0x1000, @@ -998,7 +1035,7 @@ void __init paging_init(struct machine_desc *mdesc) */ zero_page = alloc_bootmem_low_pages(PAGE_SIZE); empty_zero_page = virt_to_page(zero_page); - flush_dcache_page(empty_zero_page); + __flush_dcache_page(NULL, empty_zero_page); } /* diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 900811c..374a831 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -61,7 +61,7 @@ void setup_mm_for_reboot(char mode) void flush_dcache_page(struct page *page) { - __cpuc_flush_dcache_page(page_address(page)); + __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); } EXPORT_SYMBOL(flush_dcache_page); diff --git a/arch/arm/mm/pabort-legacy.S b/arch/arm/mm/pabort-legacy.S new file mode 100644 index 0000000..87970eb --- /dev/null +++ b/arch/arm/mm/pabort-legacy.S @@ -0,0 +1,19 @@ +#include <linux/linkage.h> +#include <asm/assembler.h> + +/* + * Function: legacy_pabort + * + * Params : r0 = address of aborted instruction + * + * Returns : r0 = address of abort + * : r1 = Simulated IFSR with section translation fault status + * + * Purpose : obtain information about current prefetch abort. + */ + + .align 5 +ENTRY(legacy_pabort) + mov r1, #5 + mov pc, lr +ENDPROC(legacy_pabort) diff --git a/arch/arm/mm/pabort-v6.S b/arch/arm/mm/pabort-v6.S new file mode 100644 index 0000000..06e3d1e --- /dev/null +++ b/arch/arm/mm/pabort-v6.S @@ -0,0 +1,19 @@ +#include <linux/linkage.h> +#include <asm/assembler.h> + +/* + * Function: v6_pabort + * + * Params : r0 = address of aborted instruction + * + * Returns : r0 = address of abort + * : r1 = IFSR + * + * Purpose : obtain information about current prefetch abort. + */ + + .align 5 +ENTRY(v6_pabort) + mrc p15, 0, r1, c5, c0, 1 @ get IFSR + mov pc, lr +ENDPROC(v6_pabort) diff --git a/arch/arm/mm/pabort-v7.S b/arch/arm/mm/pabort-v7.S new file mode 100644 index 0000000..a8b3b30 --- /dev/null +++ b/arch/arm/mm/pabort-v7.S @@ -0,0 +1,20 @@ +#include <linux/linkage.h> +#include <asm/assembler.h> + +/* + * Function: v6_pabort + * + * Params : r0 = address of aborted instruction + * + * Returns : r0 = address of abort + * : r1 = IFSR + * + * Purpose : obtain information about current prefetch abort. + */ + + .align 5 +ENTRY(v7_pabort) + mrc p15, 0, r0, c6, c0, 2 @ get IFAR + mrc p15, 0, r1, c5, c0, 1 @ get IFSR + mov pc, lr +ENDPROC(v7_pabort) diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index b5551bf..8012e24 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -231,17 +231,18 @@ ENTRY(arm1020_coherent_user_range) mov pc, lr /* - * flush_kern_dcache_page(void *page) + * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * - * - page - page aligned address + * - addr - kernel address + * - size - region size */ -ENTRY(arm1020_flush_kern_dcache_page) +ENTRY(arm1020_flush_kern_dcache_area) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE - add r1, r0, #PAGE_SZ + add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry mcr p15, 0, ip, c7, c10, 4 @ drain WB add r0, r0, #CACHE_DLINESIZE @@ -335,7 +336,7 @@ ENTRY(arm1020_cache_fns) .long arm1020_flush_user_cache_range .long arm1020_coherent_kern_range .long arm1020_coherent_user_range - .long arm1020_flush_kern_dcache_page + .long arm1020_flush_kern_dcache_area .long arm1020_dma_inv_range .long arm1020_dma_clean_range .long arm1020_dma_flush_range @@ -449,7 +450,7 @@ arm1020_crval: .type arm1020_processor_functions, #object arm1020_processor_functions: .word v4t_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm1020_proc_init .word cpu_arm1020_proc_fin .word cpu_arm1020_reset diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 8bc6740..41fe25d 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -225,17 +225,18 @@ ENTRY(arm1020e_coherent_user_range) mov pc, lr /* - * flush_kern_dcache_page(void *page) + * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * - * - page - page aligned address + * - addr - kernel address + * - size - region size */ -ENTRY(arm1020e_flush_kern_dcache_page) +ENTRY(arm1020e_flush_kern_dcache_area) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE - add r1, r0, #PAGE_SZ + add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 @@ -321,7 +322,7 @@ ENTRY(arm1020e_cache_fns) .long arm1020e_flush_user_cache_range .long arm1020e_coherent_kern_range .long arm1020e_coherent_user_range - .long arm1020e_flush_kern_dcache_page + .long arm1020e_flush_kern_dcache_area .long arm1020e_dma_inv_range .long arm1020e_dma_clean_range .long arm1020e_dma_flush_range @@ -430,7 +431,7 @@ arm1020e_crval: .type arm1020e_processor_functions, #object arm1020e_processor_functions: .word v4t_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm1020e_proc_init .word cpu_arm1020e_proc_fin .word cpu_arm1020e_reset diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index 2cd03e6..20a5b1b 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -214,17 +214,18 @@ ENTRY(arm1022_coherent_user_range) mov pc, lr /* - * flush_kern_dcache_page(void *page) + * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * - * - page - page aligned address + * - addr - kernel address + * - size - region size */ -ENTRY(arm1022_flush_kern_dcache_page) +ENTRY(arm1022_flush_kern_dcache_area) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE - add r1, r0, #PAGE_SZ + add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 @@ -310,7 +311,7 @@ ENTRY(arm1022_cache_fns) .long arm1022_flush_user_cache_range .long arm1022_coherent_kern_range .long arm1022_coherent_user_range - .long arm1022_flush_kern_dcache_page + .long arm1022_flush_kern_dcache_area .long arm1022_dma_inv_range .long arm1022_dma_clean_range .long arm1022_dma_flush_range @@ -413,7 +414,7 @@ arm1022_crval: .type arm1022_processor_functions, #object arm1022_processor_functions: .word v4t_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm1022_proc_init .word cpu_arm1022_proc_fin .word cpu_arm1022_reset diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index ad961a8..96aedb1 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -208,17 +208,18 @@ ENTRY(arm1026_coherent_user_range) mov pc, lr /* - * flush_kern_dcache_page(void *page) + * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * - * - page - page aligned address + * - addr - kernel address + * - size - region size */ -ENTRY(arm1026_flush_kern_dcache_page) +ENTRY(arm1026_flush_kern_dcache_area) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE - add r1, r0, #PAGE_SZ + add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 @@ -304,7 +305,7 @@ ENTRY(arm1026_cache_fns) .long arm1026_flush_user_cache_range .long arm1026_coherent_kern_range .long arm1026_coherent_user_range - .long arm1026_flush_kern_dcache_page + .long arm1026_flush_kern_dcache_area .long arm1026_dma_inv_range .long arm1026_dma_clean_range .long arm1026_dma_flush_range @@ -408,7 +409,7 @@ arm1026_crval: .type arm1026_processor_functions, #object arm1026_processor_functions: .word v5t_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm1026_proc_init .word cpu_arm1026_proc_fin .word cpu_arm1026_reset diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index 80d6e1d..3f9cd3d 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S @@ -278,7 +278,7 @@ __arm7_setup: mov r0, #0 .type arm6_processor_functions, #object ENTRY(arm6_processor_functions) .word cpu_arm6_data_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm6_proc_init .word cpu_arm6_proc_fin .word cpu_arm6_reset @@ -295,7 +295,7 @@ ENTRY(arm6_processor_functions) .type arm7_processor_functions, #object ENTRY(arm7_processor_functions) .word cpu_arm7_data_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm7_proc_init .word cpu_arm7_proc_fin .word cpu_arm7_reset diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S index 85ae186..0b62de2 100644 --- a/arch/arm/mm/proc-arm720.S +++ b/arch/arm/mm/proc-arm720.S @@ -181,7 +181,7 @@ arm720_crval: .type arm720_processor_functions, #object ENTRY(arm720_processor_functions) .word v4t_late_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm720_proc_init .word cpu_arm720_proc_fin .word cpu_arm720_reset diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index 4f95bee..01860cd 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S @@ -126,7 +126,7 @@ __arm740_setup: .type arm740_processor_functions, #object ENTRY(arm740_processor_functions) .word v4t_late_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm740_proc_init .word cpu_arm740_proc_fin .word cpu_arm740_reset diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S index 93e05fa..1201b98 100644 --- a/arch/arm/mm/proc-arm7tdmi.S +++ b/arch/arm/mm/proc-arm7tdmi.S @@ -64,7 +64,7 @@ __arm7tdmi_setup: .type arm7tdmi_processor_functions, #object ENTRY(arm7tdmi_processor_functions) .word v4t_late_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm7tdmi_proc_init .word cpu_arm7tdmi_proc_fin .word cpu_arm7tdmi_reset diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 914d688..471669e 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -207,15 +207,16 @@ ENTRY(arm920_coherent_user_range) mov pc, lr /* - * flush_kern_dcache_page(void *page) + * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * - * - addr - page aligned address + * - addr - kernel address + * - size - region size */ -ENTRY(arm920_flush_kern_dcache_page) - add r1, r0, #PAGE_SZ +ENTRY(arm920_flush_kern_dcache_area) + add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 @@ -293,7 +294,7 @@ ENTRY(arm920_cache_fns) .long arm920_flush_user_cache_range .long arm920_coherent_kern_range .long arm920_coherent_user_range - .long arm920_flush_kern_dcache_page + .long arm920_flush_kern_dcache_area .long arm920_dma_inv_range .long arm920_dma_clean_range .long arm920_dma_flush_range @@ -395,7 +396,7 @@ arm920_crval: .type arm920_processor_functions, #object arm920_processor_functions: .word v4t_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm920_proc_init .word cpu_arm920_proc_fin .word cpu_arm920_reset diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index 51c9c98..ee111b0 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -209,15 +209,16 @@ ENTRY(arm922_coherent_user_range) mov pc, lr /* - * flush_kern_dcache_page(void *page) + * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * - * - addr - page aligned address + * - addr - kernel address + * - size - region size */ -ENTRY(arm922_flush_kern_dcache_page) - add r1, r0, #PAGE_SZ +ENTRY(arm922_flush_kern_dcache_area) + add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 @@ -295,7 +296,7 @@ ENTRY(arm922_cache_fns) .long arm922_flush_user_cache_range .long arm922_coherent_kern_range .long arm922_coherent_user_range - .long arm922_flush_kern_dcache_page + .long arm922_flush_kern_dcache_area .long arm922_dma_inv_range .long arm922_dma_clean_range .long arm922_dma_flush_range @@ -399,7 +400,7 @@ arm922_crval: .type arm922_processor_functions, #object arm922_processor_functions: .word v4t_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm922_proc_init .word cpu_arm922_proc_fin .word cpu_arm922_reset diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 2724526..8deb5bd 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -251,15 +251,16 @@ ENTRY(arm925_coherent_user_range) mov pc, lr /* - * flush_kern_dcache_page(void *page) + * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * - * - addr - page aligned address + * - addr - kernel address + * - size - region size */ -ENTRY(arm925_flush_kern_dcache_page) - add r1, r0, #PAGE_SZ +ENTRY(arm925_flush_kern_dcache_area) + add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 @@ -346,7 +347,7 @@ ENTRY(arm925_cache_fns) .long arm925_flush_user_cache_range .long arm925_coherent_kern_range .long arm925_coherent_user_range - .long arm925_flush_kern_dcache_page + .long arm925_flush_kern_dcache_area .long arm925_dma_inv_range .long arm925_dma_clean_range .long arm925_dma_flush_range @@ -462,7 +463,7 @@ arm925_crval: .type arm925_processor_functions, #object arm925_processor_functions: .word v4t_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm925_proc_init .word cpu_arm925_proc_fin .word cpu_arm925_reset diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 5446693..64db6e2 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -214,15 +214,16 @@ ENTRY(arm926_coherent_user_range) mov pc, lr /* - * flush_kern_dcache_page(void *page) + * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * - * - addr - page aligned address + * - addr - kernel address + * - size - region size */ -ENTRY(arm926_flush_kern_dcache_page) - add r1, r0, #PAGE_SZ +ENTRY(arm926_flush_kern_dcache_area) + add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 @@ -309,7 +310,7 @@ ENTRY(arm926_cache_fns) .long arm926_flush_user_cache_range .long arm926_coherent_kern_range .long arm926_coherent_user_range - .long arm926_flush_kern_dcache_page + .long arm926_flush_kern_dcache_area .long arm926_dma_inv_range .long arm926_dma_clean_range .long arm926_dma_flush_range @@ -415,7 +416,7 @@ arm926_crval: .type arm926_processor_functions, #object arm926_processor_functions: .word v5tj_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm926_proc_init .word cpu_arm926_proc_fin .word cpu_arm926_reset diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index f595117..8196b9f 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S @@ -141,14 +141,15 @@ ENTRY(arm940_coherent_user_range) /* FALLTHROUGH */ /* - * flush_kern_dcache_page(void *page) + * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * - * - addr - page aligned address + * - addr - kernel address + * - size - region size */ -ENTRY(arm940_flush_kern_dcache_page) +ENTRY(arm940_flush_kern_dcache_area) mov ip, #0 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries @@ -238,7 +239,7 @@ ENTRY(arm940_cache_fns) .long arm940_flush_user_cache_range .long arm940_coherent_kern_range .long arm940_coherent_user_range - .long arm940_flush_kern_dcache_page + .long arm940_flush_kern_dcache_area .long arm940_dma_inv_range .long arm940_dma_clean_range .long arm940_dma_flush_range @@ -322,7 +323,7 @@ __arm940_setup: .type arm940_processor_functions, #object ENTRY(arm940_processor_functions) .word nommu_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm940_proc_init .word cpu_arm940_proc_fin .word cpu_arm940_reset diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index e03f6ff..9a95123 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S @@ -183,16 +183,17 @@ ENTRY(arm946_coherent_user_range) mov pc, lr /* - * flush_kern_dcache_page(void *page) + * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * - * - addr - page aligned address + * - addr - kernel address + * - size - region size * (same as arm926) */ -ENTRY(arm946_flush_kern_dcache_page) - add r1, r0, #PAGE_SZ +ENTRY(arm946_flush_kern_dcache_area) + add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 @@ -280,7 +281,7 @@ ENTRY(arm946_cache_fns) .long arm946_flush_user_cache_range .long arm946_coherent_kern_range .long arm946_coherent_user_range - .long arm946_flush_kern_dcache_page + .long arm946_flush_kern_dcache_area .long arm946_dma_inv_range .long arm946_dma_clean_range .long arm946_dma_flush_range @@ -377,7 +378,7 @@ __arm946_setup: .type arm946_processor_functions, #object ENTRY(arm946_processor_functions) .word nommu_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm946_proc_init .word cpu_arm946_proc_fin .word cpu_arm946_reset diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S index be6c11d..28545c2 100644 --- a/arch/arm/mm/proc-arm9tdmi.S +++ b/arch/arm/mm/proc-arm9tdmi.S @@ -64,7 +64,7 @@ __arm9tdmi_setup: .type arm9tdmi_processor_functions, #object ENTRY(arm9tdmi_processor_functions) .word nommu_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_arm9tdmi_proc_init .word cpu_arm9tdmi_proc_fin .word cpu_arm9tdmi_reset diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S index 08b8a95..08f5ac2 100644 --- a/arch/arm/mm/proc-fa526.S +++ b/arch/arm/mm/proc-fa526.S @@ -191,7 +191,7 @@ fa526_cr1_set: .type fa526_processor_functions, #object fa526_processor_functions: .word v4_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_fa526_proc_init .word cpu_fa526_proc_fin .word cpu_fa526_reset diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index 0fe1f8f..dbc3938 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S @@ -226,16 +226,17 @@ ENTRY(feroceon_coherent_user_range) mov pc, lr /* - * flush_kern_dcache_page(void *page) + * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * - * - addr - page aligned address + * - addr - kernel address + * - size - region size */ .align 5 -ENTRY(feroceon_flush_kern_dcache_page) - add r1, r0, #PAGE_SZ +ENTRY(feroceon_flush_kern_dcache_area) + add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 @@ -246,7 +247,7 @@ ENTRY(feroceon_flush_kern_dcache_page) mov pc, lr .align 5 -ENTRY(feroceon_range_flush_kern_dcache_page) +ENTRY(feroceon_range_flush_kern_dcache_area) mrs r2, cpsr add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive orr r3, r2, #PSR_I_BIT @@ -372,7 +373,7 @@ ENTRY(feroceon_cache_fns) .long feroceon_flush_user_cache_range .long feroceon_coherent_kern_range .long feroceon_coherent_user_range - .long feroceon_flush_kern_dcache_page + .long feroceon_flush_kern_dcache_area .long feroceon_dma_inv_range .long feroceon_dma_clean_range .long feroceon_dma_flush_range @@ -383,7 +384,7 @@ ENTRY(feroceon_range_cache_fns) .long feroceon_flush_user_cache_range .long feroceon_coherent_kern_range .long feroceon_coherent_user_range - .long feroceon_range_flush_kern_dcache_page + .long feroceon_range_flush_kern_dcache_area .long feroceon_range_dma_inv_range .long feroceon_range_dma_clean_range .long feroceon_range_dma_flush_range @@ -499,7 +500,7 @@ feroceon_crval: .type feroceon_processor_functions, #object feroceon_processor_functions: .word v5t_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_feroceon_proc_init .word cpu_feroceon_proc_fin .word cpu_feroceon_reset diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 540f507..9674d36 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -186,15 +186,16 @@ ENTRY(mohawk_coherent_user_range) mov pc, lr /* - * flush_kern_dcache_page(void *page) + * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * - * - addr - page aligned address + * - addr - kernel address + * - size - region size */ -ENTRY(mohawk_flush_kern_dcache_page) - add r1, r0, #PAGE_SZ +ENTRY(mohawk_flush_kern_dcache_area) + add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 @@ -273,7 +274,7 @@ ENTRY(mohawk_cache_fns) .long mohawk_flush_user_cache_range .long mohawk_coherent_kern_range .long mohawk_coherent_user_range - .long mohawk_flush_kern_dcache_page + .long mohawk_flush_kern_dcache_area .long mohawk_dma_inv_range .long mohawk_dma_clean_range .long mohawk_dma_flush_range @@ -359,7 +360,7 @@ mohawk_crval: .type mohawk_processor_functions, #object mohawk_processor_functions: .word v5t_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_mohawk_proc_init .word cpu_mohawk_proc_fin .word cpu_mohawk_reset diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S index 90a7e52..7b706b3 100644 --- a/arch/arm/mm/proc-sa110.S +++ b/arch/arm/mm/proc-sa110.S @@ -199,7 +199,7 @@ sa110_crval: .type sa110_processor_functions, #object ENTRY(sa110_processor_functions) .word v4_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_sa110_proc_init .word cpu_sa110_proc_fin .word cpu_sa110_reset diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 451e2d9..ee77002 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -214,7 +214,7 @@ sa1100_crval: .type sa1100_processor_functions, #object ENTRY(sa1100_processor_functions) .word v4_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_sa1100_proc_init .word cpu_sa1100_proc_fin .word cpu_sa1100_reset diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c index ac5c800..3e6210b 100644 --- a/arch/arm/mm/proc-syms.c +++ b/arch/arm/mm/proc-syms.c @@ -27,8 +27,7 @@ EXPORT_SYMBOL(__cpuc_flush_kern_all); EXPORT_SYMBOL(__cpuc_flush_user_all); EXPORT_SYMBOL(__cpuc_flush_user_range); EXPORT_SYMBOL(__cpuc_coherent_kern_range); -EXPORT_SYMBOL(__cpuc_flush_dcache_page); -EXPORT_SYMBOL(dmac_inv_range); /* because of flush_ioremap_region() */ +EXPORT_SYMBOL(__cpuc_flush_dcache_area); #else EXPORT_SYMBOL(cpu_cache); #endif diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 524ddae..395cc90 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -32,8 +32,10 @@ #ifndef CONFIG_SMP #define TTB_FLAGS TTB_RGN_WBWA +#define PMD_FLAGS PMD_SECT_WB #else #define TTB_FLAGS TTB_RGN_WBWA|TTB_S +#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S #endif ENTRY(cpu_v6_proc_init) @@ -128,9 +130,16 @@ ENTRY(cpu_v6_set_pte_ext) - + .type cpu_v6_name, #object cpu_v6_name: .asciz "ARMv6-compatible processor" + .size cpu_v6_name, . - cpu_v6_name + + .type cpu_pj4_name, #object +cpu_pj4_name: + .asciz "Marvell PJ4 processor" + .size cpu_pj4_name, . - cpu_pj4_name + .align __INIT @@ -191,7 +200,7 @@ v6_crval: .type v6_processor_functions, #object ENTRY(v6_processor_functions) .word v6_early_abort - .word pabort_noifar + .word v6_pabort .word cpu_v6_proc_init .word cpu_v6_proc_fin .word cpu_v6_reset @@ -222,10 +231,9 @@ __v6_proc_info: .long 0x0007b000 .long 0x0007f000 .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ + PMD_SECT_AP_READ | \ + PMD_FLAGS .long PMD_TYPE_SECT | \ PMD_SECT_XN | \ PMD_SECT_AP_WRITE | \ @@ -240,3 +248,26 @@ __v6_proc_info: .long v6_user_fns .long v6_cache_fns .size __v6_proc_info, . - __v6_proc_info + + .type __pj4_v6_proc_info, #object +__pj4_v6_proc_info: + .long 0x560f5810 + .long 0xff0ffff0 + .long PMD_TYPE_SECT | \ + PMD_SECT_AP_WRITE | \ + PMD_SECT_AP_READ | \ + PMD_FLAGS + .long PMD_TYPE_SECT | \ + PMD_SECT_XN | \ + PMD_SECT_AP_WRITE | \ + PMD_SECT_AP_READ + b __v6_setup + .long cpu_arch_name + .long cpu_elf_name + .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP + .long cpu_pj4_name + .long v6_processor_functions + .long v6wbi_tlb_fns + .long v6_user_fns + .long v6_cache_fns + .size __pj4_v6_proc_info, . - __pj4_v6_proc_info diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index f3fa1c3..3a28521 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -33,9 +33,11 @@ #ifndef CONFIG_SMP /* PTWs cacheable, inner WB not shareable, outer WB not shareable */ #define TTB_FLAGS TTB_IRGN_WB|TTB_RGN_OC_WB +#define PMD_FLAGS PMD_SECT_WB #else /* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ #define TTB_FLAGS TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA +#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S #endif ENTRY(cpu_v7_proc_init) @@ -184,9 +186,10 @@ cpu_v7_name: */ __v7_setup: #ifdef CONFIG_SMP - mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode and - orr r0, r0, #(1 << 6) | (1 << 0) @ TLB ops broadcasting - mcr p15, 0, r0, c1, c0, 1 + mrc p15, 0, r0, c1, c0, 1 + tst r0, #(1 << 6) @ SMP/nAMP mode enabled? + orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and + mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting #endif adr r12, __v7_setup_stack @ the local stack stmia r12, {r0-r5, r7, r9, r11, lr} @@ -295,7 +298,7 @@ __v7_setup_stack: .type v7_processor_functions, #object ENTRY(v7_processor_functions) .word v7_early_abort - .word pabort_ifar + .word v7_pabort .word cpu_v7_proc_init .word cpu_v7_proc_fin .word cpu_v7_reset @@ -326,10 +329,9 @@ __v7_proc_info: .long 0x000f0000 @ Required ID value .long 0x000f0000 @ Mask for ID .long PMD_TYPE_SECT | \ - PMD_SECT_BUFFERABLE | \ - PMD_SECT_CACHEABLE | \ PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ + PMD_SECT_AP_READ | \ + PMD_FLAGS .long PMD_TYPE_SECT | \ PMD_SECT_XN | \ PMD_SECT_AP_WRITE | \ diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 33515c2..96456f5 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -226,15 +226,16 @@ ENTRY(xsc3_coherent_user_range) mov pc, lr /* - * flush_kern_dcache_page(void *page) + * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache. * - * - addr - page aligned address + * - addr - kernel address + * - size - region size */ -ENTRY(xsc3_flush_kern_dcache_page) - add r1, r0, #PAGE_SZ +ENTRY(xsc3_flush_kern_dcache_area) + add r1, r0, r1 1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line add r0, r0, #CACHELINESIZE cmp r0, r1 @@ -309,7 +310,7 @@ ENTRY(xsc3_cache_fns) .long xsc3_flush_user_cache_range .long xsc3_coherent_kern_range .long xsc3_coherent_user_range - .long xsc3_flush_kern_dcache_page + .long xsc3_flush_kern_dcache_area .long xsc3_dma_inv_range .long xsc3_dma_clean_range .long xsc3_dma_flush_range @@ -396,7 +397,7 @@ __xsc3_setup: orr r4, r4, #0x18 @ cache the page table in L2 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer - mov r0, #0 @ don't allow CP access + mov r0, #1 << 6 @ cp6 access for early sched_clock mcr p15, 0, r0, c15, c1, 0 @ write CP access register mrc p15, 0, r0, c1, c0, 1 @ get auxiliary control reg @@ -428,7 +429,7 @@ xsc3_crval: .type xsc3_processor_functions, #object ENTRY(xsc3_processor_functions) .word v5t_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_xsc3_proc_init .word cpu_xsc3_proc_fin .word cpu_xsc3_reset diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 4233942..93df472 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -284,15 +284,16 @@ ENTRY(xscale_coherent_user_range) mov pc, lr /* - * flush_kern_dcache_page(void *page) + * flush_kern_dcache_area(void *addr, size_t size) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * - * - addr - page aligned address + * - addr - kernel address + * - size - region size */ -ENTRY(xscale_flush_kern_dcache_page) - add r1, r0, #PAGE_SZ +ENTRY(xscale_flush_kern_dcache_area) + add r1, r0, r1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHELINESIZE @@ -368,7 +369,7 @@ ENTRY(xscale_cache_fns) .long xscale_flush_user_cache_range .long xscale_coherent_kern_range .long xscale_coherent_user_range - .long xscale_flush_kern_dcache_page + .long xscale_flush_kern_dcache_area .long xscale_dma_inv_range .long xscale_dma_clean_range .long xscale_dma_flush_range @@ -392,7 +393,7 @@ ENTRY(xscale_80200_A0_A1_cache_fns) .long xscale_flush_user_cache_range .long xscale_coherent_kern_range .long xscale_coherent_user_range - .long xscale_flush_kern_dcache_page + .long xscale_flush_kern_dcache_area .long xscale_dma_flush_range .long xscale_dma_clean_range .long xscale_dma_flush_range @@ -511,7 +512,7 @@ xscale_crval: .type xscale_processor_functions, #object ENTRY(xscale_processor_functions) .word v5t_early_abort - .word pabort_noifar + .word legacy_pabort .word cpu_xscale_proc_init .word cpu_xscale_proc_fin .word cpu_xscale_reset diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c new file mode 100644 index 0000000..19e09bdb --- /dev/null +++ b/arch/arm/mm/vmregion.c @@ -0,0 +1,131 @@ +#include <linux/spinlock.h> +#include <linux/list.h> +#include <linux/slab.h> + +#include "vmregion.h" + +/* + * VM region handling support. + * + * This should become something generic, handling VM region allocations for + * vmalloc and similar (ioremap, module space, etc). + * + * I envisage vmalloc()'s supporting vm_struct becoming: + * + * struct vm_struct { + * struct vmregion region; + * unsigned long flags; + * struct page **pages; + * unsigned int nr_pages; + * unsigned long phys_addr; + * }; + * + * get_vm_area() would then call vmregion_alloc with an appropriate + * struct vmregion head (eg): + * + * struct vmregion vmalloc_head = { + * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), + * .vm_start = VMALLOC_START, + * .vm_end = VMALLOC_END, + * }; + * + * However, vmalloc_head.vm_start is variable (typically, it is dependent on + * the amount of RAM found at boot time.) I would imagine that get_vm_area() + * would have to initialise this each time prior to calling vmregion_alloc(). + */ + +struct arm_vmregion * +arm_vmregion_alloc(struct arm_vmregion_head *head, size_t size, gfp_t gfp) +{ + unsigned long addr = head->vm_start, end = head->vm_end - size; + unsigned long flags; + struct arm_vmregion *c, *new; + + if (head->vm_end - head->vm_start < size) { + printk(KERN_WARNING "%s: allocation too big (requested %#x)\n", + __func__, size); + goto out; + } + + new = kmalloc(sizeof(struct arm_vmregion), gfp); + if (!new) + goto out; + + spin_lock_irqsave(&head->vm_lock, flags); + + list_for_each_entry(c, &head->vm_list, vm_list) { + if ((addr + size) < addr) + goto nospc; + if ((addr + size) <= c->vm_start) + goto found; + addr = c->vm_end; + if (addr > end) + goto nospc; + } + + found: + /* + * Insert this entry _before_ the one we found. + */ + list_add_tail(&new->vm_list, &c->vm_list); + new->vm_start = addr; + new->vm_end = addr + size; + new->vm_active = 1; + + spin_unlock_irqrestore(&head->vm_lock, flags); + return new; + + nospc: + spin_unlock_irqrestore(&head->vm_lock, flags); + kfree(new); + out: + return NULL; +} + +static struct arm_vmregion *__arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr) +{ + struct arm_vmregion *c; + + list_for_each_entry(c, &head->vm_list, vm_list) { + if (c->vm_active && c->vm_start == addr) + goto out; + } + c = NULL; + out: + return c; +} + +struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr) +{ + struct arm_vmregion *c; + unsigned long flags; + + spin_lock_irqsave(&head->vm_lock, flags); + c = __arm_vmregion_find(head, addr); + spin_unlock_irqrestore(&head->vm_lock, flags); + return c; +} + +struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *head, unsigned long addr) +{ + struct arm_vmregion *c; + unsigned long flags; + + spin_lock_irqsave(&head->vm_lock, flags); + c = __arm_vmregion_find(head, addr); + if (c) + c->vm_active = 0; + spin_unlock_irqrestore(&head->vm_lock, flags); + return c; +} + +void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c) +{ + unsigned long flags; + + spin_lock_irqsave(&head->vm_lock, flags); + list_del(&c->vm_list); + spin_unlock_irqrestore(&head->vm_lock, flags); + + kfree(c); +} diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h new file mode 100644 index 0000000..6b2cdbd --- /dev/null +++ b/arch/arm/mm/vmregion.h @@ -0,0 +1,29 @@ +#ifndef VMREGION_H +#define VMREGION_H + +#include <linux/spinlock.h> +#include <linux/list.h> + +struct page; + +struct arm_vmregion_head { + spinlock_t vm_lock; + struct list_head vm_list; + unsigned long vm_start; + unsigned long vm_end; +}; + +struct arm_vmregion { + struct list_head vm_list; + unsigned long vm_start; + unsigned long vm_end; + struct page *vm_pages; + int vm_active; +}; + +struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, gfp_t); +struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long); +struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long); +void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *); + +#endif |