summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/prom.c15
-rw-r--r--arch/powerpc/mm/hash_utils_64.c1
-rw-r--r--arch/powerpc/mm/slb.c3
-rw-r--r--arch/powerpc/mm/slb_low.S5
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c3
-rw-r--r--arch/powerpc/xmon/xmon.c2
-rw-r--r--include/asm-powerpc/mmu-hash64.h1
-rw-r--r--include/asm-powerpc/reg.h6
8 files changed, 26 insertions, 10 deletions
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index acc0d24..6c2d883 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -583,6 +583,20 @@ static void __init check_cpu_pa_features(unsigned long node)
ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
}
+#ifdef CONFIG_PPC64
+static void __init check_cpu_slb_size(unsigned long node)
+{
+ u32 *slb_size_ptr;
+
+ slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
+ if (slb_size_ptr != NULL) {
+ mmu_slb_size = *slb_size_ptr;
+ }
+}
+#else
+#define check_cpu_slb_size(node) do { } while(0)
+#endif
+
static struct feature_property {
const char *name;
u32 min_value;
@@ -713,6 +727,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
check_cpu_feature_properties(node);
check_cpu_pa_features(node);
+ check_cpu_slb_size(node);
#ifdef CONFIG_PPC_PSERIES
if (nthreads > 1)
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index f09730b..cbbd8b0 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -96,6 +96,7 @@ int mmu_vmalloc_psize = MMU_PAGE_4K;
int mmu_io_psize = MMU_PAGE_4K;
int mmu_kernel_ssize = MMU_SEGSIZE_256M;
int mmu_highuser_ssize = MMU_SEGSIZE_256M;
+u16 mmu_slb_size = 64;
#ifdef CONFIG_HUGETLB_PAGE
int mmu_huge_psize = MMU_PAGE_16M;
unsigned int HPAGE_SHIFT;
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 27922df..3cf0802 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -256,6 +256,7 @@ void slb_initialize(void)
static int slb_encoding_inited;
extern unsigned int *slb_miss_kernel_load_linear;
extern unsigned int *slb_miss_kernel_load_io;
+ extern unsigned int *slb_compare_rr_to_size;
/* Prepare our SLB miss handler based on our page size */
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
@@ -269,6 +270,8 @@ void slb_initialize(void)
SLB_VSID_KERNEL | linear_llp);
patch_slb_encoding(slb_miss_kernel_load_io,
SLB_VSID_KERNEL | io_llp);
+ patch_slb_encoding(slb_compare_rr_to_size,
+ mmu_slb_size);
DBG("SLB: linear LLP = %04x\n", linear_llp);
DBG("SLB: io LLP = %04x\n", io_llp);
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 1328a81..657f6b3 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -227,8 +227,9 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
7: ld r10,PACASTABRR(r13)
addi r10,r10,1
- /* use a cpu feature mask if we ever change our slb size */
- cmpldi r10,SLB_NUM_ENTRIES
+ /* This gets soft patched on boot. */
+_GLOBAL(slb_compare_rr_to_size)
+ cmpldi r10,0
blt+ 4f
li r10,SLB_NUM_BOLTED
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index 5748194..6d7d068 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -36,6 +36,7 @@
#include <asm/mpic.h>
#include <asm/smp.h>
#include <asm/time.h>
+#include <asm/mmu.h>
#include <pcmcia/ss.h>
#include <pcmcia/cistpl.h>
@@ -302,7 +303,7 @@ static int pas_machine_check_handler(struct pt_regs *regs)
int i;
printk(KERN_ERR "slb contents:\n");
- for (i = 0; i < SLB_NUM_ENTRIES; i++) {
+ for (i = 0; i < mmu_slb_size; i++) {
asm volatile("slbmfee %0,%1" : "=r" (e) : "r" (i));
asm volatile("slbmfev %0,%1" : "=r" (v) : "r" (i));
printk(KERN_ERR "%02d %016lx %016lx\n", i, e, v);
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 381d467..c60d123 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2543,7 +2543,7 @@ static void dump_slb(void)
printf("SLB contents of cpu %x\n", smp_processor_id());
- for (i = 0; i < SLB_NUM_ENTRIES; i++) {
+ for (i = 0; i < mmu_slb_size; i++) {
asm volatile("slbmfee %0,%1" : "=r" (tmp) : "r" (i));
printf("%02d %016lx ", i, tmp);
diff --git a/include/asm-powerpc/mmu-hash64.h b/include/asm-powerpc/mmu-hash64.h
index 82328de..12e5e77 100644
--- a/include/asm-powerpc/mmu-hash64.h
+++ b/include/asm-powerpc/mmu-hash64.h
@@ -180,6 +180,7 @@ extern int mmu_vmalloc_psize;
extern int mmu_io_psize;
extern int mmu_kernel_ssize;
extern int mmu_highuser_ssize;
+extern u16 mmu_slb_size;
/*
* If the processor supports 64k normal pages but not 64k cache
diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h
index e775ff1..1f68504 100644
--- a/include/asm-powerpc/reg.h
+++ b/include/asm-powerpc/reg.h
@@ -691,12 +691,6 @@
#define PV_BE 0x0070
#define PV_PA6T 0x0090
-/*
- * Number of entries in the SLB. If this ever changes we should handle
- * it with a use a cpu feature fixup.
- */
-#define SLB_NUM_ENTRIES 64
-
/* Macros for setting and retrieving special purpose registers */
#ifndef __ASSEMBLY__
#define mfmsr() ({unsigned long rval; \
OpenPOWER on IntegriCloud