summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/sh/boot/Makefile9
-rw-r--r--arch/sh/include/asm/addrspace.h4
-rw-r--r--arch/sh/include/asm/io.h27
-rw-r--r--arch/sh/include/asm/mmu.h18
-rw-r--r--arch/sh/include/asm/page.h2
-rw-r--r--arch/sh/kernel/head_32.S4
-rw-r--r--arch/sh/kernel/setup.c3
-rw-r--r--arch/sh/kernel/vmlinux.lds.S15
-rw-r--r--arch/sh/mm/Kconfig24
-rw-r--r--arch/sh/mm/Makefile2
-rw-r--r--arch/sh/mm/pmb.c106
11 files changed, 117 insertions, 97 deletions
diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile
index 743ce0c..1ce6362 100644
--- a/arch/sh/boot/Makefile
+++ b/arch/sh/boot/Makefile
@@ -46,15 +46,8 @@ $(obj)/romImage: $(obj)/romimage/vmlinux FORCE
$(obj)/romimage/vmlinux: $(obj)/zImage FORCE
$(Q)$(MAKE) $(build)=$(obj)/romimage $@
-KERNEL_MEMORY := 0x00000000
-ifeq ($(CONFIG_PMB_FIXED),y)
-KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \
+KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \
$$[$(CONFIG_MEMORY_START) & 0x1fffffff]')
-endif
-ifeq ($(CONFIG_29BIT),y)
-KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \
- $$[$(CONFIG_MEMORY_START)]')
-endif
KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \
$$[$(CONFIG_PAGE_OFFSET) + \
diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h
index 99d6b3e..bcd7d4d 100644
--- a/arch/sh/include/asm/addrspace.h
+++ b/arch/sh/include/asm/addrspace.h
@@ -28,7 +28,7 @@
/* Returns the privileged segment base of a given address */
#define PXSEG(a) (((unsigned long)(a)) & 0xe0000000)
-#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED)
+#ifdef CONFIG_29BIT
/*
* Map an address to a certain privileged segment
*/
@@ -40,7 +40,7 @@
((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG))
#define P4SEGADDR(a) \
((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG))
-#endif /* 29BIT || PMB_FIXED */
+#endif /* 29BIT */
#endif /* P1SEG */
/* Check if an address can be reached in 29 bits */
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 026dd65..f4314d8 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -244,18 +244,11 @@ __ioremap(unsigned long offset, unsigned long size, unsigned long flags)
}
static inline void __iomem *
-__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
+__ioremap_29bit(unsigned long offset, unsigned long size, unsigned long flags)
{
-#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB)
+#ifdef CONFIG_29BIT
unsigned long last_addr = offset + size - 1;
-#endif
- void __iomem *ret;
- ret = __ioremap_trapped(offset, size);
- if (ret)
- return ret;
-
-#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB)
/*
* For P1 and P2 space this is trivial, as everything is already
* mapped. Uncached access for P1 addresses are done through P2.
@@ -274,6 +267,22 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
return (void __iomem *)P4SEGADDR(offset);
#endif
+ return NULL;
+}
+
+static inline void __iomem *
+__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
+{
+ void __iomem *ret;
+
+ ret = __ioremap_trapped(offset, size);
+ if (ret)
+ return ret;
+
+ ret = __ioremap_29bit(offset, size, flags);
+ if (ret)
+ return ret;
+
return __ioremap(offset, size, flags);
}
#else
diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h
index c7426ad..4b0882b 100644
--- a/arch/sh/include/asm/mmu.h
+++ b/arch/sh/include/asm/mmu.h
@@ -65,11 +65,29 @@ struct pmb_entry {
struct pmb_entry *link;
};
+#ifdef CONFIG_PMB
/* arch/sh/mm/pmb.c */
long pmb_remap(unsigned long virt, unsigned long phys,
unsigned long size, unsigned long flags);
void pmb_unmap(unsigned long addr);
int pmb_init(void);
+#else
+static inline long pmb_remap(unsigned long virt, unsigned long phys,
+ unsigned long size, unsigned long flags)
+{
+ return -EINVAL
+}
+
+static inline void pmb_unmap(unsigned long addr)
+{
+}
+
+static inline int pmb_init(void)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_PMB */
+
#endif /* __ASSEMBLY__ */
#endif /* __MMU_H */
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index 81bffc0..a86c0f1 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -127,7 +127,7 @@ typedef struct page *pgtable_t;
* is not visible (it is part of the PMB mapping) and so needs to be
* added or subtracted as required.
*/
-#if defined(CONFIG_PMB_FIXED)
+#if defined(CONFIG_PMB_LEGACY)
/* phys = virt - PAGE_OFFSET - (__MEMORY_START & 0xe0000000) */
#define PMB_OFFSET (PAGE_OFFSET - PXSEG(__MEMORY_START))
#define __pa(x) ((unsigned long)(x) - PMB_OFFSET)
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
index 1151ecd..e5d421db 100644
--- a/arch/sh/kernel/head_32.S
+++ b/arch/sh/kernel/head_32.S
@@ -13,6 +13,8 @@
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/thread_info.h>
+#include <asm/mmu.h>
+#include <cpu/mmu_context.h>
#ifdef CONFIG_CPU_SH4A
#define SYNCO() synco
@@ -33,7 +35,7 @@ ENTRY(empty_zero_page)
.long 1 /* LOADER_TYPE */
.long 0x00000000 /* INITRD_START */
.long 0x00000000 /* INITRD_SIZE */
-#if defined(CONFIG_32BIT) && defined(CONFIG_PMB_FIXED)
+#ifdef CONFIG_32BIT
.long 0x53453f00 + 32 /* "SE?" = 32 bit */
#else
.long 0x53453f00 + 29 /* "SE?" = 29 bit */
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 8b0e697..f79ebe3 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -455,10 +455,7 @@ void __init setup_arch(char **cmdline_p)
sh_mv.mv_setup(cmdline_p);
paging_init();
-
-#ifdef CONFIG_PMB_ENABLE
pmb_init();
-#endif
#ifdef CONFIG_SMP
plat_smp_setup();
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index a1e4ec2..9e5a587 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -14,17 +14,16 @@ OUTPUT_ARCH(sh)
#include <asm/cache.h>
#include <asm/vmlinux.lds.h>
+#if defined(CONFIG_32BIT) && !defined(CONFIG_PMB_LEGACY)
+#define MEMORY_OFFSET 0
+#else
+#define MEMORY_OFFSET (CONFIG_MEMORY_START & 0x1fffffff)
+#endif
+
ENTRY(_start)
SECTIONS
{
-#ifdef CONFIG_PMB_FIXED
- . = CONFIG_PAGE_OFFSET + (CONFIG_MEMORY_START & 0x1fffffff) +
- CONFIG_ZERO_PAGE_OFFSET;
-#elif defined(CONFIG_32BIT)
- . = CONFIG_PAGE_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
-#else
- . = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET;
-#endif
+ . = CONFIG_PAGE_OFFSET + MEMORY_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
_text = .; /* Text and read-only data */
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 358c860..860cd24 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -80,30 +80,18 @@ config 32BIT
bool
default y if CPU_SH5
-config PMB_ENABLE
- bool "Support 32-bit physical addressing through PMB"
- depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP
- help
- If you say Y here, physical addressing will be extended to
- 32-bits through the SH-4A PMB. If this is not set, legacy
- 29-bit physical addressing will be used.
-
-choice
- prompt "PMB handling type"
- depends on PMB_ENABLE
- default PMB_FIXED
-
config PMB
- bool "PMB"
+ bool "Support 32-bit physical addressing through PMB"
depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP
+ select 32BIT
help
If you say Y here, physical addressing will be extended to
32-bits through the SH-4A PMB. If this is not set, legacy
29-bit physical addressing will be used.
-config PMB_FIXED
- bool "fixed PMB"
- depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP
+config PMB_LEGACY
+ bool "Support legacy boot mappings for PMB"
+ depends on PMB
select 32BIT
help
If this option is enabled, fixed PMB mappings are inherited
@@ -111,8 +99,6 @@ config PMB_FIXED
management. This is the closest to legacy 29-bit physical mode,
and allows systems to support up to 512MiB of system memory.
-endchoice
-
config X2TLB
bool "Enable extended TLB mode"
depends on (CPU_SHX2 || CPU_SHX3) && MMU && EXPERIMENTAL
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index 9fa11d6..edde8bd 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -33,7 +33,7 @@ obj-y += $(tlb-y)
endif
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_PMB_ENABLE) += pmb.o
+obj-$(CONFIG_PMB) += pmb.o
obj-$(CONFIG_NUMA) += numa.o
# Special flags for fault_64.o. This puts restrictions on the number of
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 280f6a1..8f7dbf1 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -3,7 +3,7 @@
*
* Privileged Space Mapping Buffer (PMB) Support.
*
- * Copyright (C) 2005, 2006, 2007 Paul Mundt
+ * Copyright (C) 2005 - 2010 Paul Mundt
*
* P1/P2 Section mapping definitions from map32.h, which was:
*
@@ -279,51 +279,12 @@ static void __pmb_unmap(struct pmb_entry *pmbe)
} while (pmbe);
}
-#ifdef CONFIG_PMB
-int __uses_jump_to_uncached pmb_init(void)
-{
- unsigned int i;
- long size, ret;
-
- jump_to_uncached();
-
- /*
- * Insert PMB entries for the P1 and P2 areas so that, after
- * we've switched the MMU to 32-bit mode, the semantics of P1
- * and P2 are the same as in 29-bit mode, e.g.
- *
- * P1 - provides a cached window onto physical memory
- * P2 - provides an uncached window onto physical memory
- */
- size = __MEMORY_START + __MEMORY_SIZE;
-
- ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C);
- BUG_ON(ret != size);
-
- ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB);
- BUG_ON(ret != size);
-
- ctrl_outl(0, PMB_IRMCR);
-
- /* PMB.SE and UB[7] */
- ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR);
-
- /* Flush out the TLB */
- i = ctrl_inl(MMUCR);
- i |= MMUCR_TI;
- ctrl_outl(i, MMUCR);
-
- back_to_cached();
-
- return 0;
-}
-#else
-int __uses_jump_to_uncached pmb_init(void)
+#ifdef CONFIG_PMB_LEGACY
+static int pmb_apply_legacy_mappings(void)
{
int i;
unsigned long addr, data;
-
- jump_to_uncached();
+ unsigned int applied = 0;
for (i = 0; i < PMB_ENTRY_MAX; i++) {
struct pmb_entry *pmbe;
@@ -357,13 +318,69 @@ int __uses_jump_to_uncached pmb_init(void)
pmbe = pmb_alloc(vpn, ppn, flags, i);
WARN_ON(IS_ERR(pmbe));
+
+ applied++;
+ }
+
+ return (applied == 0);
+}
+#else
+static inline int pmb_apply_legacy_mappings(void)
+{
+ return 1;
+}
+#endif
+
+int __uses_jump_to_uncached pmb_init(void)
+{
+ unsigned int i;
+ unsigned long size, ret;
+
+ jump_to_uncached();
+
+ /*
+ * Attempt to apply the legacy boot mappings if configured. If
+ * this is successful then we simply carry on with those and
+ * don't bother establishing additional memory mappings. Dynamic
+ * device mappings through pmb_remap() can still be bolted on
+ * after this.
+ */
+ ret = pmb_apply_legacy_mappings();
+ if (ret == 0) {
+ back_to_cached();
+ return 0;
}
+ /*
+ * Insert PMB entries for the P1 and P2 areas so that, after
+ * we've switched the MMU to 32-bit mode, the semantics of P1
+ * and P2 are the same as in 29-bit mode, e.g.
+ *
+ * P1 - provides a cached window onto physical memory
+ * P2 - provides an uncached window onto physical memory
+ */
+ size = (unsigned long)__MEMORY_START + __MEMORY_SIZE;
+
+ ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C);
+ BUG_ON(ret != size);
+
+ ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB);
+ BUG_ON(ret != size);
+
+ ctrl_outl(0, PMB_IRMCR);
+
+ /* PMB.SE and UB[7] */
+ ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR);
+
+ /* Flush out the TLB */
+ i = ctrl_inl(MMUCR);
+ i |= MMUCR_TI;
+ ctrl_outl(i, MMUCR);
+
back_to_cached();
return 0;
}
-#endif /* CONFIG_PMB */
static int pmb_seq_show(struct seq_file *file, void *iter)
{
@@ -462,6 +479,5 @@ static int __init pmb_sysdev_init(void)
{
return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
}
-
subsys_initcall(pmb_sysdev_init);
#endif
OpenPOWER on IntegriCloud