summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig26
-rw-r--r--arch/arm/mm/abort-ev7.S21
-rw-r--r--arch/arm/mm/alignment.c53
-rw-r--r--arch/arm/mm/cache-l2x0.c39
-rw-r--r--arch/arm/mm/copypage-fa.c2
-rw-r--r--arch/arm/mm/fault-armv.c2
-rw-r--r--arch/arm/mm/fault.c5
-rw-r--r--arch/arm/mm/init.c34
-rw-r--r--arch/arm/mm/mm.h3
-rw-r--r--arch/arm/mm/mmu.c39
10 files changed, 149 insertions, 75 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 5bd7c89..346ae14 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -572,6 +572,8 @@ config CPU_TLB_V6
config CPU_TLB_V7
bool
+config VERIFY_PERMISSION_FAULT
+ bool
endif
config CPU_HAS_ASID
@@ -760,7 +762,8 @@ config CACHE_FEROCEON_L2_WRITETHROUGH
config CACHE_L2X0
bool "Enable the L2x0 outer cache controller"
depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \
- REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4
+ REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || \
+ ARCH_NOMADIK || ARCH_OMAP4 || ARCH_U8500 || ARCH_VEXPRESS_CA9X4
default y
select OUTER_CACHE
select OUTER_CACHE_SYNC
@@ -769,7 +772,7 @@ config CACHE_L2X0
config CACHE_TAUROS2
bool "Enable the Tauros2 L2 cache controller"
- depends on ARCH_DOVE
+ depends on (ARCH_DOVE || ARCH_MMP)
default y
select OUTER_CACHE
help
@@ -789,6 +792,25 @@ config ARM_L1_CACHE_SHIFT
default 6 if ARM_L1_CACHE_SHIFT_6
default 5
+config ARM_DMA_MEM_BUFFERABLE
+ bool "Use non-cacheable memory for DMA" if CPU_V6 && !CPU_V7
+ default y if CPU_V6 || CPU_V7
+ help
+ Historically, the kernel has used strongly ordered mappings to
+ provide DMA coherent memory. With the advent of ARMv7, mapping
+ memory with differing types results in unpredictable behaviour,
+ so on these CPUs, this option is forced on.
+
+ Multiple mappings with differing attributes is also unpredictable
+ on ARMv6 CPUs, but since they do not have aggressive speculative
+ prefetch, no harm appears to occur.
+
+ However, drivers may be missing the necessary barriers for ARMv6,
+ and therefore turning this on may result in unpredictable driver
+ behaviour. Therefore, we offer this as an option.
+
+ You are recommended say 'Y' here and debug any affected drivers.
+
config ARCH_HAS_BARRIERS
bool
help
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index 2e6dc04..ec88b15 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -29,5 +29,26 @@ ENTRY(v7_early_abort)
* V6 code adjusts the returned DFSR.
* New designs should not need to patch up faults.
*/
+
+#if defined(CONFIG_VERIFY_PERMISSION_FAULT)
+ /*
+ * Detect erroneous permission failures and fix
+ */
+ ldr r3, =0x40d @ On permission fault
+ and r3, r1, r3
+ cmp r3, #0x0d
+ movne pc, lr
+
+ mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR
+ isb
+ mrc p15, 0, r2, c7, c4, 0 @ Read the PAR
+ and r3, r2, #0x7b @ On translation fault
+ cmp r3, #0x0b
+ movne pc, lr
+ bic r1, r1, #0xf @ Fix up FSR FS[5:0]
+ and r2, r2, #0x7e
+ orr r1, r1, r2, LSR #1
+#endif
+
mov pc, lr
ENDPROC(v7_early_abort)
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index a2ab51f..6f98c35 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
@@ -94,36 +95,29 @@ static const char *usermode_action[] = {
"signal+warn"
};
-static int
-proc_alignment_read(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int alignment_proc_show(struct seq_file *m, void *v)
{
- char *p = page;
- int len;
-
- p += sprintf(p, "User:\t\t%lu\n", ai_user);
- p += sprintf(p, "System:\t\t%lu\n", ai_sys);
- p += sprintf(p, "Skipped:\t%lu\n", ai_skipped);
- p += sprintf(p, "Half:\t\t%lu\n", ai_half);
- p += sprintf(p, "Word:\t\t%lu\n", ai_word);
+ seq_printf(m, "User:\t\t%lu\n", ai_user);
+ seq_printf(m, "System:\t\t%lu\n", ai_sys);
+ seq_printf(m, "Skipped:\t%lu\n", ai_skipped);
+ seq_printf(m, "Half:\t\t%lu\n", ai_half);
+ seq_printf(m, "Word:\t\t%lu\n", ai_word);
if (cpu_architecture() >= CPU_ARCH_ARMv5TE)
- p += sprintf(p, "DWord:\t\t%lu\n", ai_dword);
- p += sprintf(p, "Multi:\t\t%lu\n", ai_multi);
- p += sprintf(p, "User faults:\t%i (%s)\n", ai_usermode,
+ seq_printf(m, "DWord:\t\t%lu\n", ai_dword);
+ seq_printf(m, "Multi:\t\t%lu\n", ai_multi);
+ seq_printf(m, "User faults:\t%i (%s)\n", ai_usermode,
usermode_action[ai_usermode]);
- len = (p - page) - off;
- if (len < 0)
- len = 0;
-
- *eof = (len <= count) ? 1 : 0;
- *start = page + off;
+ return 0;
+}
- return len;
+static int alignment_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, alignment_proc_show, NULL);
}
-static int proc_alignment_write(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static ssize_t alignment_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
char mode;
@@ -136,6 +130,13 @@ static int proc_alignment_write(struct file *file, const char __user *buffer,
return count;
}
+static const struct file_operations alignment_proc_fops = {
+ .open = alignment_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = alignment_proc_write,
+};
#endif /* CONFIG_PROC_FS */
union offset_union {
@@ -901,12 +902,10 @@ static int __init alignment_init(void)
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *res;
- res = create_proc_entry("cpu/alignment", S_IWUSR | S_IRUGO, NULL);
+ res = proc_create("cpu/alignment", S_IWUSR | S_IRUGO, NULL,
+ &alignment_proc_fops);
if (!res)
return -ENOMEM;
-
- res->read_proc = proc_alignment_read;
- res->write_proc = proc_alignment_write;
#endif
/*
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 21ad68b..9819869 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -27,6 +27,7 @@
static void __iomem *l2x0_base;
static DEFINE_SPINLOCK(l2x0_lock);
+static uint32_t l2x0_way_mask; /* Bitmask of active ways */
static inline void cache_wait(void __iomem *reg, unsigned long mask)
{
@@ -108,8 +109,8 @@ static inline void l2x0_inv_all(void)
/* invalidate all ways */
spin_lock_irqsave(&l2x0_lock, flags);
- writel(0xff, l2x0_base + L2X0_INV_WAY);
- cache_wait(l2x0_base + L2X0_INV_WAY, 0xff);
+ writel(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
+ cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
cache_sync();
spin_unlock_irqrestore(&l2x0_lock, flags);
}
@@ -208,9 +209,37 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
{
__u32 aux;
+ __u32 cache_id;
+ int ways;
+ const char *type;
l2x0_base = base;
+ cache_id = readl(l2x0_base + L2X0_CACHE_ID);
+ aux = readl(l2x0_base + L2X0_AUX_CTRL);
+
+ /* Determine the number of ways */
+ switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
+ case L2X0_CACHE_ID_PART_L310:
+ if (aux & (1 << 16))
+ ways = 16;
+ else
+ ways = 8;
+ type = "L310";
+ break;
+ case L2X0_CACHE_ID_PART_L210:
+ ways = (aux >> 13) & 0xf;
+ type = "L210";
+ break;
+ default:
+ /* Assume unknown chips have 8 ways */
+ ways = 8;
+ type = "L2x0 series";
+ break;
+ }
+
+ l2x0_way_mask = (1 << ways) - 1;
+
/*
* Check if l2x0 controller is already enabled.
* If you are booting from non-secure mode
@@ -219,8 +248,6 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
if (!(readl(l2x0_base + L2X0_CTRL) & 1)) {
/* l2x0 controller is disabled */
-
- aux = readl(l2x0_base + L2X0_AUX_CTRL);
aux &= aux_mask;
aux |= aux_val;
writel(aux, l2x0_base + L2X0_AUX_CTRL);
@@ -236,5 +263,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
outer_cache.flush_range = l2x0_flush_range;
outer_cache.sync = l2x0_cache_sync;
- printk(KERN_INFO "L2X0 cache controller enabled\n");
+ printk(KERN_INFO "%s cache controller enabled\n", type);
+ printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
+ ways, cache_id, aux);
}
diff --git a/arch/arm/mm/copypage-fa.c b/arch/arm/mm/copypage-fa.c
index b2a6008..d2852e1 100644
--- a/arch/arm/mm/copypage-fa.c
+++ b/arch/arm/mm/copypage-fa.c
@@ -40,7 +40,7 @@ fa_copy_user_page(void *kto, const void *kfrom)
}
void fa_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 0d414c2..9b906de 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -134,8 +134,6 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
flush_dcache_mmap_unlock(mapping);
if (aliases)
do_adjust_pte(vma, addr, pfn, ptep);
- else
- flush_cache_page(vma, addr, pfn);
}
/*
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 9d40c34..92f5801 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -463,7 +463,12 @@ static struct fsr_info {
{ do_bad, SIGILL, BUS_ADRALN, "alignment exception" },
{ do_bad, SIGKILL, 0, "terminal exception" },
{ do_bad, SIGILL, BUS_ADRALN, "alignment exception" },
+/* Do we need runtime check ? */
+#if __LINUX_ARM_ARCH__ < 6
{ do_bad, SIGBUS, 0, "external abort on linefetch" },
+#else
+ { do_translation_fault, SIGSEGV, SEGV_MAPERR, "I-cache maintenance fault" },
+#endif
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
{ do_bad, SIGBUS, 0, "external abort on linefetch" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 0ed29bf..1ba6cf5 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -15,7 +15,6 @@
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/initrd.h>
-#include <linux/sort.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
@@ -224,20 +223,6 @@ static int __init check_initrd(struct meminfo *mi)
return initrd_node;
}
-static inline void map_memory_bank(struct membank *bank)
-{
-#ifdef CONFIG_MMU
- struct map_desc map;
-
- map.pfn = bank_pfn_start(bank);
- map.virtual = __phys_to_virt(bank_phys_start(bank));
- map.length = bank_phys_size(bank);
- map.type = MT_MEMORY;
-
- create_mapping(&map);
-#endif
-}
-
static void __init bootmem_init_node(int node, struct meminfo *mi,
unsigned long start_pfn, unsigned long end_pfn)
{
@@ -247,16 +232,6 @@ static void __init bootmem_init_node(int node, struct meminfo *mi,
int i;
/*
- * Map the memory banks for this node.
- */
- for_each_nodebank(i, mi, node) {
- struct membank *bank = &mi->bank[i];
-
- if (!bank->highmem)
- map_memory_bank(bank);
- }
-
- /*
* Allocate the bootmem bitmap page.
*/
boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
@@ -385,21 +360,12 @@ static void arm_memory_present(struct meminfo *mi, int node)
}
#endif
-static int __init meminfo_cmp(const void *_a, const void *_b)
-{
- const struct membank *a = _a, *b = _b;
- long cmp = bank_pfn_start(a) - bank_pfn_start(b);
- return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
-}
-
void __init bootmem_init(void)
{
struct meminfo *mi = &meminfo;
unsigned long min, max_low, max_high;
int node, initrd_node;
- sort(&mi->bank, mi->nr_banks, sizeof(mi->bank[0]), meminfo_cmp, NULL);
-
/*
* Locate which node contains the ramdisk image, if any.
*/
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index a888363..815d08e 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -28,10 +28,7 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
#endif
-struct map_desc;
-struct meminfo;
struct pglist_data;
-void __init create_mapping(struct map_desc *md);
void __init bootmem_init(void);
void reserve_node_zero(struct pglist_data *pgdat);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 241c24a..e7113d0 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -14,6 +14,7 @@
#include <linux/bootmem.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
+#include <linux/sort.h>
#include <asm/cputype.h>
#include <asm/mach-types.h>
@@ -603,7 +604,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
* offsets, and we take full advantage of sections and
* supersections.
*/
-void __init create_mapping(struct map_desc *md)
+static void __init create_mapping(struct map_desc *md)
{
unsigned long phys, addr, length, end;
const struct mem_type *type;
@@ -1017,6 +1018,39 @@ static void __init kmap_init(void)
#endif
}
+static inline void map_memory_bank(struct membank *bank)
+{
+ struct map_desc map;
+
+ map.pfn = bank_pfn_start(bank);
+ map.virtual = __phys_to_virt(bank_phys_start(bank));
+ map.length = bank_phys_size(bank);
+ map.type = MT_MEMORY;
+
+ create_mapping(&map);
+}
+
+static void __init map_lowmem(void)
+{
+ struct meminfo *mi = &meminfo;
+ int i;
+
+ /* Map all the lowmem memory banks. */
+ for (i = 0; i < mi->nr_banks; i++) {
+ struct membank *bank = &mi->bank[i];
+
+ if (!bank->highmem)
+ map_memory_bank(bank);
+ }
+}
+
+static int __init meminfo_cmp(const void *_a, const void *_b)
+{
+ const struct membank *a = _a, *b = _b;
+ long cmp = bank_pfn_start(a) - bank_pfn_start(b);
+ return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+}
+
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
@@ -1025,9 +1059,12 @@ void __init paging_init(struct machine_desc *mdesc)
{
void *zero_page;
+ sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+
build_mem_type_table();
sanity_check_meminfo();
prepare_page_table();
+ map_lowmem();
bootmem_init();
devicemaps_init(mdesc);
kmap_init();
OpenPOWER on IntegriCloud