summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/Makefile7
-rw-r--r--arch/powerpc/mm/hash_native_64.c13
-rw-r--r--arch/powerpc/mm/init_64.c2
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c19
-rw-r--r--arch/powerpc/mm/numa.c2
5 files changed, 31 insertions, 12 deletions
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index b746f4c..c4bcf07 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -11,10 +11,11 @@ obj-y := fault.o mem.o pgtable.o gup.o \
pgtable_$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
tlb_nohash_low.o
-hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o
-obj-$(CONFIG_PPC64) += hash_utils_64.o \
+obj-$(CONFIG_PPC64) += mmap_64.o
+hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o
+obj-$(CONFIG_PPC_STD_MMU_64) += hash_utils_64.o \
slb_low.o slb.o stab.o \
- mmap_64.o $(hash-y)
+ mmap_64.o $(hash64-y)
obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o
obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \
tlb_hash$(CONFIG_WORD_SIZE).o \
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 34e5c0b..056d23a 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -27,6 +27,7 @@
#include <asm/cputable.h>
#include <asm/udbg.h>
#include <asm/kexec.h>
+#include <asm/ppc-opcode.h>
#ifdef DEBUG_LOW
#define DBG_LOW(fmt...) udbg_printf(fmt)
@@ -49,14 +50,21 @@ static inline void __tlbie(unsigned long va, int psize, int ssize)
case MMU_PAGE_4K:
va &= ~0xffful;
va |= ssize << 8;
- asm volatile("tlbie %0,0" : : "r" (va) : "memory");
+ asm volatile(ASM_MMU_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0),
+ %2)
+ : : "r" (va), "r"(0), "i" (MMU_FTR_TLBIE_206)
+ : "memory");
break;
default:
penc = mmu_psize_defs[psize].penc;
va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
va |= penc << 12;
va |= ssize << 8;
- asm volatile("tlbie %0,1" : : "r" (va) : "memory");
+ va |= 1; /* L */
+ asm volatile(ASM_MMU_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0),
+ %2)
+ : : "r" (va), "r"(0), "i" (MMU_FTR_TLBIE_206)
+ : "memory");
break;
}
}
@@ -80,6 +88,7 @@ static inline void __tlbiel(unsigned long va, int psize, int ssize)
va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
va |= penc << 12;
va |= ssize << 8;
+ va |= 1; /* L */
asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
: : "r"(va) : "memory");
break;
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 3e6a654..68a821a 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -66,6 +66,7 @@
#include "mmu_decl.h"
+#ifdef CONFIG_PPC_STD_MMU_64
#if PGTABLE_RANGE > USER_VSID_RANGE
#warning Limited user VSID range means pagetable space is wasted
#endif
@@ -73,6 +74,7 @@
#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
#warning TASK_SIZE is smaller than it needs to be.
#endif
+#endif /* CONFIG_PPC_STD_MMU_64 */
phys_addr_t memstart_addr = ~0;
phys_addr_t kernstart_addr;
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 030d000..8343986 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -46,7 +46,7 @@ static unsigned int next_context, nr_free_contexts;
static unsigned long *context_map;
static unsigned long *stale_map[NR_CPUS];
static struct mm_struct **context_mm;
-static spinlock_t context_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(context_lock);
#define CTX_MAP_SIZE \
(sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
@@ -73,7 +73,6 @@ static unsigned int steal_context_smp(unsigned int id)
struct mm_struct *mm;
unsigned int cpu, max;
- again:
max = last_context - first_context;
/* Attempt to free next_context first and then loop until we manage */
@@ -108,7 +107,9 @@ static unsigned int steal_context_smp(unsigned int id)
spin_unlock(&context_lock);
cpu_relax();
spin_lock(&context_lock);
- goto again;
+
+ /* This will cause the caller to try again */
+ return MMU_NO_CONTEXT;
}
#endif /* CONFIG_SMP */
@@ -194,6 +195,8 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
WARN_ON(prev->context.active < 1);
prev->context.active--;
}
+
+ again:
#endif /* CONFIG_SMP */
/* If we already have a valid assigned context, skip all that */
@@ -212,7 +215,8 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
#ifdef CONFIG_SMP
if (num_online_cpus() > 1) {
id = steal_context_smp(id);
- goto stolen;
+ if (id == MMU_NO_CONTEXT)
+ goto again;
}
#endif /* CONFIG_SMP */
id = steal_context_up(id);
@@ -272,6 +276,7 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm)
*/
void destroy_context(struct mm_struct *mm)
{
+ unsigned long flags;
unsigned int id;
if (mm->context.id == MMU_NO_CONTEXT)
@@ -279,18 +284,18 @@ void destroy_context(struct mm_struct *mm)
WARN_ON(mm->context.active != 0);
- spin_lock(&context_lock);
+ spin_lock_irqsave(&context_lock, flags);
id = mm->context.id;
if (id != MMU_NO_CONTEXT) {
__clear_bit(id, context_map);
mm->context.id = MMU_NO_CONTEXT;
#ifdef DEBUG_MAP_CONSISTENCY
mm->context.active = 0;
- context_mm[id] = NULL;
#endif
+ context_mm[id] = NULL;
nr_free_contexts++;
}
- spin_unlock(&context_lock);
+ spin_unlock_irqrestore(&context_lock, flags);
}
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 9047145..b037d95 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -981,6 +981,8 @@ void __init do_init_bootmem(void)
mark_reserved_regions_for_nid(nid);
sparse_memory_present_with_active_regions(nid);
}
+
+ init_bootmem_done = 1;
}
void __init paging_init(void)
OpenPOWER on IntegriCloud