diff options
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/40x_mmu.c (renamed from arch/powerpc/mm/4xx_mmu.c) | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/Makefile | 15 | ||||
-rw-r--r-- | arch/powerpc/mm/fsl_booke_mmu.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_low_64.S | 73 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_native_64.c | 92 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 121 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 16 | ||||
-rw-r--r-- | arch/powerpc/mm/init_32.c | 41 | ||||
-rw-r--r-- | arch/powerpc/mm/init_64.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_context_64.c | 11 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_64.c | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/slb.c | 73 | ||||
-rw-r--r-- | arch/powerpc/mm/slb_low.S | 37 | ||||
-rw-r--r-- | arch/powerpc/mm/slice.c | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/stab.c | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_64.c | 20 |
17 files changed, 355 insertions, 166 deletions
diff --git a/arch/powerpc/mm/4xx_mmu.c b/arch/powerpc/mm/40x_mmu.c index 7ff2609..e067df8 100644 --- a/arch/powerpc/mm/4xx_mmu.c +++ b/arch/powerpc/mm/40x_mmu.c @@ -108,7 +108,7 @@ unsigned long __init mmu_mapin_ram(void) pmd_t *pmdp; unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE; - pmdp = pmd_offset(pgd_offset_k(v), v); + pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v); pmd_val(*pmdp++) = val; pmd_val(*pmdp++) = val; pmd_val(*pmdp++) = val; @@ -123,7 +123,7 @@ unsigned long __init mmu_mapin_ram(void) pmd_t *pmdp; unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE; - pmdp = pmd_offset(pgd_offset_k(v), v); + pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v); pmd_val(*pmdp) = val; v += LARGE_PAGE_SIZE_4M; diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 7e4d27a..20629ae 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -6,14 +6,17 @@ ifeq ($(CONFIG_PPC64),y) EXTRA_CFLAGS += -mno-minimal-toc endif -obj-y := fault.o mem.o lmb.o -obj-$(CONFIG_PPC32) += init_32.o pgtable_32.o mmu_context_32.o +obj-y := fault.o mem.o lmb.o \ + init_$(CONFIG_WORD_SIZE).o \ + pgtable_$(CONFIG_WORD_SIZE).o \ + mmu_context_$(CONFIG_WORD_SIZE).o hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o -obj-$(CONFIG_PPC64) += init_64.o pgtable_64.o mmu_context_64.o \ - hash_utils_64.o hash_low_64.o tlb_64.o \ +obj-$(CONFIG_PPC64) += hash_utils_64.o \ slb_low.o slb.o stab.o mmap.o $(hash-y) -obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o tlb_32.o -obj-$(CONFIG_40x) += 4xx_mmu.o +obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o +obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \ + tlb_$(CONFIG_WORD_SIZE).o +obj-$(CONFIG_40x) += 40x_mmu.o obj-$(CONFIG_44x) += 44x_mmu.o obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index afab247..17139da 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -59,6 +59,7 @@ unsigned int num_tlbcam_entries; static unsigned long __cam0, __cam1, __cam2; extern unsigned long total_lowmem; extern unsigned long __max_low_memory; +extern unsigned long __initial_memory_limit; #define MAX_LOW_MEM CONFIG_LOWMEM_SIZE #define NUM_TLBCAMS (16) @@ -232,4 +233,5 @@ adjust_total_lowmem(void) __cam0 >> 20, __cam1 >> 20, __cam2 >> 20, (total_lowmem - __cam0 - __cam1 - __cam2) >> 20); __max_low_memory = max_low_mem = __cam0 + __cam1 + __cam2; + __initial_memory_limit = __max_low_memory; } diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 35eabfb..ad253b9 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -54,7 +54,7 @@ /* * _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, - * pte_t *ptep, unsigned long trap, int local) + * pte_t *ptep, unsigned long trap, int local, int ssize) * * Adds a 4K page to the hash table in a segment of 4K pages only */ @@ -66,6 +66,7 @@ _GLOBAL(__hash_page_4K) /* Save all params that we need after a function call */ std r6,STK_PARM(r6)(r1) std r8,STK_PARM(r8)(r1) + std r9,STK_PARM(r9)(r1) /* Add _PAGE_PRESENT to access */ ori r4,r4,_PAGE_PRESENT @@ -117,6 +118,10 @@ _GLOBAL(__hash_page_4K) * r4 (access) is re-useable, we use it for the new HPTE flags */ +BEGIN_FTR_SECTION + cmpdi r9,0 /* check segment size */ + bne 3f +END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) /* Calc va and put it in r29 */ rldicr r29,r5,28,63-28 rldicl r3,r3,0,36 @@ -126,9 +131,20 @@ _GLOBAL(__hash_page_4K) rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */ xor r28,r5,r0 + b 4f + +3: /* Calc VA and hash in r29 and r28 for 1T segment */ + sldi r29,r5,40 /* vsid << 40 */ + clrldi r3,r3,24 /* ea & 0xffffffffff */ + rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ + clrldi r5,r5,40 /* vsid & 0xffffff */ + rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ + xor r28,r28,r5 + or r29,r3,r29 /* VA */ + xor r28,r28,r0 /* hash */ /* Convert linux PTE bits into HW equivalents */ - andi. r3,r30,0x1fe /* Get basic set of flags */ +4: andi. r3,r30,0x1fe /* Get basic set of flags */ xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */ rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */ rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */ @@ -183,6 +199,7 @@ htab_insert_pte: mr r4,r29 /* Retreive va */ li r7,0 /* !bolted, !secondary */ li r8,MMU_PAGE_4K /* page size */ + ld r9,STK_PARM(r9)(r1) /* segment size */ _GLOBAL(htab_call_hpte_insert1) bl . /* Patched by htab_finish_init() */ cmpdi 0,r3,0 @@ -205,6 +222,7 @@ _GLOBAL(htab_call_hpte_insert1) mr r4,r29 /* Retreive va */ li r7,HPTE_V_SECONDARY /* !bolted, secondary */ li r8,MMU_PAGE_4K /* page size */ + ld r9,STK_PARM(r9)(r1) /* segment size */ _GLOBAL(htab_call_hpte_insert2) bl . /* Patched by htab_finish_init() */ cmpdi 0,r3,0 @@ -273,7 +291,8 @@ htab_modify_pte: /* Call ppc_md.hpte_updatepp */ mr r5,r29 /* va */ li r6,MMU_PAGE_4K /* page size */ - ld r7,STK_PARM(r8)(r1) /* get "local" param */ + ld r7,STK_PARM(r9)(r1) /* segment size */ + ld r8,STK_PARM(r8)(r1) /* get "local" param */ _GLOBAL(htab_call_hpte_updatepp) bl . /* Patched by htab_finish_init() */ @@ -325,6 +344,7 @@ _GLOBAL(__hash_page_4K) /* Save all params that we need after a function call */ std r6,STK_PARM(r6)(r1) std r8,STK_PARM(r8)(r1) + std r9,STK_PARM(r9)(r1) /* Add _PAGE_PRESENT to access */ ori r4,r4,_PAGE_PRESENT @@ -383,18 +403,33 @@ _GLOBAL(__hash_page_4K) /* Load the hidx index */ rldicl r25,r3,64-12,60 +BEGIN_FTR_SECTION + cmpdi r9,0 /* check segment size */ + bne 3f +END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) /* Calc va and put it in r29 */ rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */ rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */ - or r29,r3,r29 /* r29 = va + or r29,r3,r29 /* r29 = va */ /* Calculate hash value for primary slot and store it in r28 */ rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */ xor r28,r5,r0 + b 4f + +3: /* Calc VA and hash in r29 and r28 for 1T segment */ + sldi r29,r5,40 /* vsid << 40 */ + clrldi r3,r3,24 /* ea & 0xffffffffff */ + rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ + clrldi r5,r5,40 /* vsid & 0xffffff */ + rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ + xor r28,r28,r5 + or r29,r3,r29 /* VA */ + xor r28,r28,r0 /* hash */ /* Convert linux PTE bits into HW equivalents */ - andi. r3,r30,0x1fe /* Get basic set of flags */ +4: andi. r3,r30,0x1fe /* Get basic set of flags */ xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */ rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */ rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */ @@ -462,6 +497,7 @@ htab_special_pfn: mr r4,r29 /* Retreive va */ li r7,0 /* !bolted, !secondary */ li r8,MMU_PAGE_4K /* page size */ + ld r9,STK_PARM(r9)(r1) /* segment size */ _GLOBAL(htab_call_hpte_insert1) bl . /* patched by htab_finish_init() */ cmpdi 0,r3,0 @@ -488,6 +524,7 @@ _GLOBAL(htab_call_hpte_insert1) mr r4,r29 /* Retreive va */ li r7,HPTE_V_SECONDARY /* !bolted, secondary */ li r8,MMU_PAGE_4K /* page size */ + ld r9,STK_PARM(r9)(r1) /* segment size */ _GLOBAL(htab_call_hpte_insert2) bl . /* patched by htab_finish_init() */ cmpdi 0,r3,0 @@ -586,7 +623,8 @@ htab_modify_pte: /* Call ppc_md.hpte_updatepp */ mr r5,r29 /* va */ li r6,MMU_PAGE_4K /* page size */ - ld r7,STK_PARM(r8)(r1) /* get "local" param */ + ld r7,STK_PARM(r9)(r1) /* segment size */ + ld r8,STK_PARM(r8)(r1) /* get "local" param */ _GLOBAL(htab_call_hpte_updatepp) bl . /* patched by htab_finish_init() */ @@ -634,6 +672,7 @@ _GLOBAL(__hash_page_64K) /* Save all params that we need after a function call */ std r6,STK_PARM(r6)(r1) std r8,STK_PARM(r8)(r1) + std r9,STK_PARM(r9)(r1) /* Add _PAGE_PRESENT to access */ ori r4,r4,_PAGE_PRESENT @@ -690,6 +729,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) * r4 (access) is re-useable, we use it for the new HPTE flags */ +BEGIN_FTR_SECTION + cmpdi r9,0 /* check segment size */ + bne 3f +END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) /* Calc va and put it in r29 */ rldicr r29,r5,28,63-28 rldicl r3,r3,0,36 @@ -699,9 +742,20 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ rldicl r0,r3,64-16,52 /* (ea >> 16) & 0xfff */ xor r28,r5,r0 + b 4f + +3: /* Calc VA and hash in r29 and r28 for 1T segment */ + sldi r29,r5,40 /* vsid << 40 */ + clrldi r3,r3,24 /* ea & 0xffffffffff */ + rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ + clrldi r5,r5,40 /* vsid & 0xffffff */ + rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */ + xor r28,r28,r5 + or r29,r3,r29 /* VA */ + xor r28,r28,r0 /* hash */ /* Convert linux PTE bits into HW equivalents */ - andi. r3,r30,0x1fe /* Get basic set of flags */ +4: andi. r3,r30,0x1fe /* Get basic set of flags */ xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */ rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */ rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */ @@ -756,6 +810,7 @@ ht64_insert_pte: mr r4,r29 /* Retreive va */ li r7,0 /* !bolted, !secondary */ li r8,MMU_PAGE_64K + ld r9,STK_PARM(r9)(r1) /* segment size */ _GLOBAL(ht64_call_hpte_insert1) bl . /* patched by htab_finish_init() */ cmpdi 0,r3,0 @@ -778,6 +833,7 @@ _GLOBAL(ht64_call_hpte_insert1) mr r4,r29 /* Retreive va */ li r7,HPTE_V_SECONDARY /* !bolted, secondary */ li r8,MMU_PAGE_64K + ld r9,STK_PARM(r9)(r1) /* segment size */ _GLOBAL(ht64_call_hpte_insert2) bl . /* patched by htab_finish_init() */ cmpdi 0,r3,0 @@ -846,7 +902,8 @@ ht64_modify_pte: /* Call ppc_md.hpte_updatepp */ mr r5,r29 /* va */ li r6,MMU_PAGE_64K - ld r7,STK_PARM(r8)(r1) /* get "local" param */ + ld r7,STK_PARM(r9)(r1) /* segment size */ + ld r8,STK_PARM(r8)(r1) /* get "local" param */ _GLOBAL(ht64_call_hpte_updatepp) bl . /* patched by htab_finish_init() */ diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 6ba9b47..34e5c0b 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -38,7 +38,7 @@ static DEFINE_SPINLOCK(native_tlbie_lock); -static inline void __tlbie(unsigned long va, unsigned int psize) +static inline void __tlbie(unsigned long va, int psize, int ssize) { unsigned int penc; @@ -48,18 +48,20 @@ static inline void __tlbie(unsigned long va, unsigned int psize) switch (psize) { case MMU_PAGE_4K: va &= ~0xffful; + va |= ssize << 8; asm volatile("tlbie %0,0" : : "r" (va) : "memory"); break; default: penc = mmu_psize_defs[psize].penc; va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); va |= penc << 12; + va |= ssize << 8; asm volatile("tlbie %0,1" : : "r" (va) : "memory"); break; } } -static inline void __tlbiel(unsigned long va, unsigned int psize) +static inline void __tlbiel(unsigned long va, int psize, int ssize) { unsigned int penc; @@ -69,6 +71,7 @@ static inline void __tlbiel(unsigned long va, unsigned int psize) switch (psize) { case MMU_PAGE_4K: va &= ~0xffful; + va |= ssize << 8; asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)" : : "r"(va) : "memory"); break; @@ -76,6 +79,7 @@ static inline void __tlbiel(unsigned long va, unsigned int psize) penc = mmu_psize_defs[psize].penc; va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); va |= penc << 12; + va |= ssize << 8; asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)" : : "r"(va) : "memory"); break; @@ -83,7 +87,7 @@ static inline void __tlbiel(unsigned long va, unsigned int psize) } -static inline void tlbie(unsigned long va, int psize, int local) +static inline void tlbie(unsigned long va, int psize, int ssize, int local) { unsigned int use_local = local && cpu_has_feature(CPU_FTR_TLBIEL); int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); @@ -94,10 +98,10 @@ static inline void tlbie(unsigned long va, int psize, int local) spin_lock(&native_tlbie_lock); asm volatile("ptesync": : :"memory"); if (use_local) { - __tlbiel(va, psize); + __tlbiel(va, psize, ssize); asm volatile("ptesync": : :"memory"); } else { - __tlbie(va, psize); + __tlbie(va, psize, ssize); asm volatile("eieio; tlbsync; ptesync": : :"memory"); } if (lock_tlbie && !use_local) @@ -126,7 +130,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep) static long native_hpte_insert(unsigned long hpte_group, unsigned long va, unsigned long pa, unsigned long rflags, - unsigned long vflags, int psize) + unsigned long vflags, int psize, int ssize) { struct hash_pte *hptep = htab_address + hpte_group; unsigned long hpte_v, hpte_r; @@ -153,7 +157,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va, if (i == HPTES_PER_GROUP) return -1; - hpte_v = hpte_encode_v(va, psize) | vflags | HPTE_V_VALID; + hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; hpte_r = hpte_encode_r(pa, psize) | rflags; if (!(vflags & HPTE_V_BOLTED)) { @@ -215,13 +219,14 @@ static long native_hpte_remove(unsigned long hpte_group) } static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, - unsigned long va, int psize, int local) + unsigned long va, int psize, int ssize, + int local) { struct hash_pte *hptep = htab_address + slot; unsigned long hpte_v, want_v; int ret = 0; - want_v = hpte_encode_v(va, psize); + want_v = hpte_encode_v(va, psize, ssize); DBG_LOW(" update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x)", va, want_v & HPTE_V_AVPN, slot, newpp); @@ -243,39 +248,32 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, native_unlock_hpte(hptep); /* Ensure it is out of the tlb too. */ - tlbie(va, psize, local); + tlbie(va, psize, ssize, local); return ret; } -static long native_hpte_find(unsigned long va, int psize) +static long native_hpte_find(unsigned long va, int psize, int ssize) { struct hash_pte *hptep; unsigned long hash; - unsigned long i, j; + unsigned long i; long slot; unsigned long want_v, hpte_v; - hash = hpt_hash(va, mmu_psize_defs[psize].shift); - want_v = hpte_encode_v(va, psize); + hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize); + want_v = hpte_encode_v(va, psize, ssize); - for (j = 0; j < 2; j++) { - slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; - for (i = 0; i < HPTES_PER_GROUP; i++) { - hptep = htab_address + slot; - hpte_v = hptep->v; + /* Bolted mappings are only ever in the primary group */ + slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; + for (i = 0; i < HPTES_PER_GROUP; i++) { + hptep = htab_address + slot; + hpte_v = hptep->v; - if (HPTE_V_COMPARE(hpte_v, want_v) - && (hpte_v & HPTE_V_VALID) - && ( !!(hpte_v & HPTE_V_SECONDARY) == j)) { - /* HPTE matches */ - if (j) - slot = -slot; - return slot; - } - ++slot; - } - hash = ~hash; + if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) + /* HPTE matches */ + return slot; + ++slot; } return -1; @@ -289,16 +287,16 @@ static long native_hpte_find(unsigned long va, int psize) * No need to lock here because we should be the only user. */ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, - int psize) + int psize, int ssize) { unsigned long vsid, va; long slot; struct hash_pte *hptep; - vsid = get_kernel_vsid(ea); - va = (vsid << 28) | (ea & 0x0fffffff); + vsid = get_kernel_vsid(ea, ssize); + va = hpt_va(ea, vsid, ssize); - slot = native_hpte_find(va, psize); + slot = native_hpte_find(va, psize, ssize); if (slot == -1) panic("could not find page to bolt\n"); hptep = htab_address + slot; @@ -308,11 +306,11 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, (newpp & (HPTE_R_PP | HPTE_R_N)); /* Ensure it is out of the tlb too. */ - tlbie(va, psize, 0); + tlbie(va, psize, ssize, 0); } static void native_hpte_invalidate(unsigned long slot, unsigned long va, - int psize, int local) + int psize, int ssize, int local) { struct hash_pte *hptep = htab_address + slot; unsigned long hpte_v; @@ -323,7 +321,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va, DBG_LOW(" invalidate(va=%016lx, hash: %x)\n", va, slot); - want_v = hpte_encode_v(va, psize); + want_v = hpte_encode_v(va, psize, ssize); native_lock_hpte(hptep); hpte_v = hptep->v; @@ -335,7 +333,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va, hptep->v = 0; /* Invalidate the TLB */ - tlbie(va, psize, local); + tlbie(va, psize, ssize, local); local_irq_restore(flags); } @@ -345,7 +343,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va, #define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT) static void hpte_decode(struct hash_pte *hpte, unsigned long slot, - int *psize, unsigned long *va) + int *psize, int *ssize, unsigned long *va) { unsigned long hpte_r = hpte->r; unsigned long hpte_v = hpte->v; @@ -401,6 +399,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot, *va = avpn; *psize = size; + *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT; } /* @@ -417,7 +416,7 @@ static void native_hpte_clear(void) struct hash_pte *hptep = htab_address; unsigned long hpte_v, va; unsigned long pteg_count; - int psize; + int psize, ssize; pteg_count = htab_hash_mask + 1; @@ -443,9 +442,9 @@ static void native_hpte_clear(void) * already hold the native_tlbie_lock. */ if (hpte_v & HPTE_V_VALID) { - hpte_decode(hptep, slot, &psize, &va); + hpte_decode(hptep, slot, &psize, &ssize, &va); hptep->v = 0; - __tlbie(va, psize); + __tlbie(va, psize, ssize); } } @@ -468,6 +467,7 @@ static void native_flush_hash_range(unsigned long number, int local) real_pte_t pte; struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); unsigned long psize = batch->psize; + int ssize = batch->ssize; int i; local_irq_save(flags); @@ -477,14 +477,14 @@ static void native_flush_hash_range(unsigned long number, int local) pte = batch->pte[i]; pte_iterate_hashed_subpages(pte, psize, va, index, shift) { - hash = hpt_hash(va, shift); + hash = hpt_hash(va, shift, ssize); hidx = __rpte_to_hidx(pte, index); if (hidx & _PTEIDX_SECONDARY) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += hidx & _PTEIDX_GROUP_IX; hptep = htab_address + slot; - want_v = hpte_encode_v(va, psize); + want_v = hpte_encode_v(va, psize, ssize); native_lock_hpte(hptep); hpte_v = hptep->v; if (!HPTE_V_COMPARE(hpte_v, want_v) || @@ -504,7 +504,7 @@ static void native_flush_hash_range(unsigned long number, int local) pte_iterate_hashed_subpages(pte, psize, va, index, shift) { - __tlbiel(va, psize); + __tlbiel(va, psize, ssize); } pte_iterate_hashed_end(); } asm volatile("ptesync":::"memory"); @@ -521,7 +521,7 @@ static void native_flush_hash_range(unsigned long number, int local) pte_iterate_hashed_subpages(pte, psize, va, index, shift) { - __tlbie(va, psize); + __tlbie(va, psize, ssize); } pte_iterate_hashed_end(); } asm volatile("eieio; tlbsync; ptesync":::"memory"); diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index a47151e..611ad08 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -49,7 +49,6 @@ #include <asm/tlb.h> #include <asm/cacheflush.h> #include <asm/cputable.h> -#include <asm/abs_addr.h> #include <asm/sections.h> #include <asm/spu.h> @@ -94,6 +93,8 @@ int mmu_linear_psize = MMU_PAGE_4K; int mmu_virtual_psize = MMU_PAGE_4K; int mmu_vmalloc_psize = MMU_PAGE_4K; int mmu_io_psize = MMU_PAGE_4K; +int mmu_kernel_ssize = MMU_SEGSIZE_256M; +int mmu_highuser_ssize = MMU_SEGSIZE_256M; #ifdef CONFIG_HUGETLB_PAGE int mmu_huge_psize = MMU_PAGE_16M; unsigned int HPAGE_SHIFT; @@ -146,7 +147,8 @@ struct mmu_psize_def mmu_psize_defaults_gp[] = { int htab_bolt_mapping(unsigned long vstart, unsigned long vend, - unsigned long pstart, unsigned long mode, int psize) + unsigned long pstart, unsigned long mode, + int psize, int ssize) { unsigned long vaddr, paddr; unsigned int step, shift; @@ -159,8 +161,8 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, for (vaddr = vstart, paddr = pstart; vaddr < vend; vaddr += step, paddr += step) { unsigned long hash, hpteg; - unsigned long vsid = get_kernel_vsid(vaddr); - unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff); + unsigned long vsid = get_kernel_vsid(vaddr, ssize); + unsigned long va = hpt_va(vaddr, vsid, ssize); tmp_mode = mode; @@ -168,14 +170,14 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, if (!in_kernel_text(vaddr)) tmp_mode = mode | HPTE_R_N; - hash = hpt_hash(va, shift); + hash = hpt_hash(va, shift, ssize); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); DBG("htab_bolt_mapping: calling %p\n", ppc_md.hpte_insert); BUG_ON(!ppc_md.hpte_insert); ret = ppc_md.hpte_insert(hpteg, va, paddr, - tmp_mode, HPTE_V_BOLTED, psize); + tmp_mode, HPTE_V_BOLTED, psize, ssize); if (ret < 0) break; @@ -187,6 +189,37 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, return ret < 0 ? ret : 0; } +static int __init htab_dt_scan_seg_sizes(unsigned long node, + const char *uname, int depth, + void *data) +{ + char *type = of_get_flat_dt_prop(node, "device_type", NULL); + u32 *prop; + unsigned long size = 0; + + /* We are scanning "cpu" nodes only */ + if (type == NULL || strcmp(type, "cpu") != 0) + return 0; + + prop = (u32 *)of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", + &size); + if (prop == NULL) + return 0; + for (; size >= 4; size -= 4, ++prop) { + if (prop[0] == 40) { + DBG("1T segment support detected\n"); + cur_cpu_spec->cpu_features |= CPU_FTR_1T_SEGMENT; + } + return 1; + } + return 0; +} + +static void __init htab_init_seg_sizes(void) +{ + of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL); +} + static int __init htab_dt_scan_page_sizes(unsigned long node, const char *uname, int depth, void *data) @@ -266,7 +299,6 @@ static int __init htab_dt_scan_page_sizes(unsigned long node, return 0; } - static void __init htab_init_page_sizes(void) { int rc; @@ -399,7 +431,7 @@ void create_section_mapping(unsigned long start, unsigned long end) { BUG_ON(htab_bolt_mapping(start, end, __pa(start), _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX, - mmu_linear_psize)); + mmu_linear_psize, mmu_kernel_ssize)); } #endif /* CONFIG_MEMORY_HOTPLUG */ @@ -450,9 +482,18 @@ void __init htab_initialize(void) DBG(" -> htab_initialize()\n"); + /* Initialize segment sizes */ + htab_init_seg_sizes(); + /* Initialize page sizes */ htab_init_page_sizes(); + if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) { + mmu_kernel_ssize = MMU_SEGSIZE_1T; + mmu_highuser_ssize = MMU_SEGSIZE_1T; + printk(KERN_INFO "Using 1TB segments\n"); + } + /* * Calculate the required size of the htab. We want the number of * PTEGs to equal one half the number of real pages. @@ -524,18 +565,20 @@ void __init htab_initialize(void) if (base != dart_tablebase) BUG_ON(htab_bolt_mapping(base, dart_tablebase, __pa(base), mode_rw, - mmu_linear_psize)); + mmu_linear_psize, + mmu_kernel_ssize)); if ((base + size) > dart_table_end) BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB, base + size, __pa(dart_table_end), mode_rw, - mmu_linear_psize)); + mmu_linear_psize, + mmu_kernel_ssize)); continue; } #endif /* CONFIG_U3_DART */ BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), - mode_rw, mmu_linear_psize)); + mode_rw, mmu_linear_psize, mmu_kernel_ssize)); } /* @@ -554,7 +597,7 @@ void __init htab_initialize(void) BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end, __pa(tce_alloc_start), mode_rw, - mmu_linear_psize)); + mmu_linear_psize, mmu_kernel_ssize)); } htab_finish_init(); @@ -602,13 +645,7 @@ static void demote_segment_4k(struct mm_struct *mm, unsigned long addr) { if (mm->context.user_psize == MMU_PAGE_4K) return; -#ifdef CONFIG_PPC_MM_SLICES slice_set_user_psize(mm, MMU_PAGE_4K); -#else /* CONFIG_PPC_MM_SLICES */ - mm->context.user_psize = MMU_PAGE_4K; - mm->context.sllp = SLB_VSID_USER | mmu_psize_defs[MMU_PAGE_4K].sllp; -#endif /* CONFIG_PPC_MM_SLICES */ - #ifdef CONFIG_SPU_BASE spu_flush_all_slbs(mm); #endif @@ -628,7 +665,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) pte_t *ptep; cpumask_t tmp; int rc, user_region = 0, local = 0; - int psize; + int psize, ssize; DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", ea, access, trap); @@ -647,20 +684,22 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) DBG_LOW(" user region with no mm !\n"); return 1; } - vsid = get_vsid(mm->context.id, ea); #ifdef CONFIG_PPC_MM_SLICES psize = get_slice_psize(mm, ea); #else psize = mm->context.user_psize; #endif + ssize = user_segment_size(ea); + vsid = get_vsid(mm->context.id, ea, ssize); break; case VMALLOC_REGION_ID: mm = &init_mm; - vsid = get_kernel_vsid(ea); + vsid = get_kernel_vsid(ea, mmu_kernel_ssize); if (ea < VMALLOC_END) psize = mmu_vmalloc_psize; else psize = mmu_io_psize; + ssize = mmu_kernel_ssize; break; default: /* Not a valid range @@ -765,10 +804,10 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) #ifdef CONFIG_PPC_HAS_HASH_64K if (psize == MMU_PAGE_64K) - rc = __hash_page_64K(ea, access, vsid, ptep, trap, local); + rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize); else #endif /* CONFIG_PPC_HAS_HASH_64K */ - rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); + rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize); #ifndef CONFIG_PPC_64K_PAGES DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep)); @@ -790,6 +829,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, cpumask_t mask; unsigned long flags; int local = 0; + int ssize; BUG_ON(REGION_ID(ea) != USER_REGION_ID); @@ -822,7 +862,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, #endif /* CONFIG_PPC_64K_PAGES */ /* Get VSID */ - vsid = get_vsid(mm->context.id, ea); + ssize = user_segment_size(ea); + vsid = get_vsid(mm->context.id, ea, ssize); /* Hash doesn't like irqs */ local_irq_save(flags); @@ -835,28 +876,29 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, /* Hash it in */ #ifdef CONFIG_PPC_HAS_HASH_64K if (mm->context.user_psize == MMU_PAGE_64K) - __hash_page_64K(ea, access, vsid, ptep, trap, local); + __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize); else #endif /* CONFIG_PPC_HAS_HASH_64K */ - __hash_page_4K(ea, access, vsid, ptep, trap, local); + __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize); local_irq_restore(flags); } -void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int local) +void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize, + int local) { unsigned long hash, index, shift, hidx, slot; DBG_LOW("flush_hash_page(va=%016x)\n", va); pte_iterate_hashed_subpages(pte, psize, va, index, shift) { - hash = hpt_hash(va, shift); + hash = hpt_hash(va, shift, ssize); hidx = __rpte_to_hidx(pte, index); if (hidx & _PTEIDX_SECONDARY) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += hidx & _PTEIDX_GROUP_IX; DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx); - ppc_md.hpte_invalidate(slot, va, psize, local); + ppc_md.hpte_invalidate(slot, va, psize, ssize, local); } pte_iterate_hashed_end(); } @@ -871,7 +913,7 @@ void flush_hash_range(unsigned long number, int local) for (i = 0; i < number; i++) flush_hash_page(batch->vaddr[i], batch->pte[i], - batch->psize, local); + batch->psize, batch->ssize, local); } } @@ -897,17 +939,19 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address) #ifdef CONFIG_DEBUG_PAGEALLOC static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) { - unsigned long hash, hpteg, vsid = get_kernel_vsid(vaddr); - unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff); + unsigned long hash, hpteg; + unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); + unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize); unsigned long mode = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX | HPTE_R_N; int ret; - hash = hpt_hash(va, PAGE_SHIFT); + hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr), - mode, HPTE_V_BOLTED, mmu_linear_psize); + mode, HPTE_V_BOLTED, + mmu_linear_psize, mmu_kernel_ssize); BUG_ON (ret < 0); spin_lock(&linear_map_hash_lock); BUG_ON(linear_map_hash_slots[lmi] & 0x80); @@ -917,10 +961,11 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) { - unsigned long hash, hidx, slot, vsid = get_kernel_vsid(vaddr); - unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff); + unsigned long hash, hidx, slot; + unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); + unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize); - hash = hpt_hash(va, PAGE_SHIFT); + hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize); spin_lock(&linear_map_hash_lock); BUG_ON(!(linear_map_hash_slots[lmi] & 0x80)); hidx = linear_map_hash_slots[lmi] & 0x7f; @@ -930,7 +975,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += hidx & _PTEIDX_GROUP_IX; - ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, 0); + ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0); } void kernel_map_pages(struct page *page, int numpages, int enable) diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 4835f73..08f0d9f 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -22,11 +22,8 @@ #include <asm/mmu_context.h> #include <asm/machdep.h> #include <asm/cputable.h> -#include <asm/tlb.h> #include <asm/spu.h> -#include <linux/sysctl.h> - #define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT) #define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT) @@ -406,11 +403,12 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access, unsigned long va, rflags, pa; long slot; int err = 1; + int ssize = user_segment_size(ea); ptep = huge_pte_offset(mm, ea); /* Search the Linux page table for a match with va */ - va = (vsid << 28) | (ea & 0x0fffffff); + va = hpt_va(ea, vsid, ssize); /* * If no pte found or not present, send the problem up to @@ -461,19 +459,19 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access, /* There MIGHT be an HPTE for this pte */ unsigned long hash, slot; - hash = hpt_hash(va, HPAGE_SHIFT); + hash = hpt_hash(va, HPAGE_SHIFT, ssize); if (old_pte & _PAGE_F_SECOND) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += (old_pte & _PAGE_F_GIX) >> 12; if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize, - local) == -1) + ssize, local) == -1) old_pte &= ~_PAGE_HPTEFLAGS; } if (likely(!(old_pte & _PAGE_HASHPTE))) { - unsigned long hash = hpt_hash(va, HPAGE_SHIFT); + unsigned long hash = hpt_hash(va, HPAGE_SHIFT, ssize); unsigned long hpte_group; pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; @@ -492,7 +490,7 @@ repeat: /* Insert into the hash table, primary slot */ slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0, - mmu_huge_psize); + mmu_huge_psize, ssize); /* Primary is full, try the secondary */ if (unlikely(slot == -1)) { @@ -500,7 +498,7 @@ repeat: HPTES_PER_GROUP) & ~0x7UL; slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, HPTE_V_SECONDARY, - mmu_huge_psize); + mmu_huge_psize, ssize); if (slot == -1) { if (mftb() & 0x1) hpte_group = ((hash & htab_hash_mask) * diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index e1f5ded..977cb1e 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -41,7 +41,6 @@ #include <asm/machdep.h> #include <asm/btext.h> #include <asm/tlb.h> -#include <asm/prom.h> #include <asm/lmb.h> #include <asm/sections.h> @@ -133,6 +132,9 @@ void __init MMU_init(void) /* 601 can only access 16MB at the moment */ if (PVR_VER(mfspr(SPRN_PVR)) == 1) __initial_memory_limit = 0x01000000; + /* 8xx can only access 8MB at the moment */ + if (PVR_VER(mfspr(SPRN_PVR)) == 0x50) + __initial_memory_limit = 0x00800000; /* parse args from command line */ MMU_setup(); @@ -256,3 +258,40 @@ void free_initrd_mem(unsigned long start, unsigned long end) } } #endif + +#ifdef CONFIG_PROC_KCORE +static struct kcore_list kcore_vmem; + +static int __init setup_kcore(void) +{ + int i; + + for (i = 0; i < lmb.memory.cnt; i++) { + unsigned long base; + unsigned long size; + struct kcore_list *kcore_mem; + + base = lmb.memory.region[i].base; + size = lmb.memory.region[i].size; + + kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC); + if (!kcore_mem) + panic("%s: kmalloc failed\n", __FUNCTION__); + + /* must stay under 32 bits */ + if ( 0xfffffffful - (unsigned long)__va(base) < size) { + size = 0xfffffffful - (unsigned long)(__va(base)); + printk(KERN_DEBUG "setup_kcore: restrict size=%lx\n", + size); + } + + kclist_add(kcore_mem, __va(base), size); + } + + kclist_add(&kcore_vmem, (void *)VMALLOC_START, + VMALLOC_END-VMALLOC_START); + + return 0; +} +module_init(setup_kcore); +#endif diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 9f27bb5..fa90f65 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -113,6 +113,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) } #endif +#ifdef CONFIG_PROC_KCORE static struct kcore_list kcore_vmem; static int __init setup_kcore(void) @@ -139,6 +140,7 @@ static int __init setup_kcore(void) return 0; } module_init(setup_kcore); +#endif static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags) { diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index f0e7eed..32dcfc9 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -42,7 +42,6 @@ #include <asm/machdep.h> #include <asm/btext.h> #include <asm/tlb.h> -#include <asm/prom.h> #include <asm/lmb.h> #include <asm/sections.h> #include <asm/vdso.h> diff --git a/arch/powerpc/mm/mmu_context_64.c b/arch/powerpc/mm/mmu_context_64.c index 7a78cdc..1db38ba 100644 --- a/arch/powerpc/mm/mmu_context_64.c +++ b/arch/powerpc/mm/mmu_context_64.c @@ -28,7 +28,6 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { int index; int err; - int new_context = (mm->context.id == 0); again: if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL)) @@ -50,19 +49,13 @@ again: return -ENOMEM; } - mm->context.id = index; -#ifdef CONFIG_PPC_MM_SLICES /* The old code would re-promote on fork, we don't do that * when using slices as it could cause problem promoting slices * that have been forced down to 4K */ - if (new_context) + if (slice_mm_new_context(mm)) slice_set_user_psize(mm, mmu_virtual_psize); -#else - mm->context.user_psize = mmu_virtual_psize; - mm->context.sllp = SLB_VSID_USER | - mmu_psize_defs[mmu_virtual_psize].sllp; -#endif + mm->context.id = index; return 0; } diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 3dfd10d..3ef0ad2 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -87,8 +87,8 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags) * entry in the hardware page table. * */ - if (htab_bolt_mapping(ea, (unsigned long)ea + PAGE_SIZE, - pa, flags, mmu_io_psize)) { + if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags, + mmu_io_psize, mmu_kernel_ssize)) { printk(KERN_ERR "Failed to do bolted mapping IO " "memory at %016lx !\n", pa); return -ENOMEM; @@ -228,5 +228,7 @@ void iounmap(volatile void __iomem *token) EXPORT_SYMBOL(ioremap); EXPORT_SYMBOL(ioremap_flags); EXPORT_SYMBOL(__ioremap); +EXPORT_SYMBOL(__ioremap_at); EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(__iounmap); +EXPORT_SYMBOL(__iounmap_at); diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index ff1811a..6c164ce 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -43,30 +43,37 @@ static void slb_allocate(unsigned long ea) slb_allocate_realmode(ea); } -static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot) +static inline unsigned long mk_esid_data(unsigned long ea, int ssize, + unsigned long slot) { - return (ea & ESID_MASK) | SLB_ESID_V | slot; + unsigned long mask; + + mask = (ssize == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T; + return (ea & mask) | SLB_ESID_V | slot; } -static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags) +#define slb_vsid_shift(ssize) \ + ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T) + +static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, + unsigned long flags) { - return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; + return (get_kernel_vsid(ea, ssize) << slb_vsid_shift(ssize)) | flags | + ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT); } -static inline void slb_shadow_update(unsigned long ea, +static inline void slb_shadow_update(unsigned long ea, int ssize, unsigned long flags, unsigned long entry) { /* * Clear the ESID first so the entry is not valid while we are - * updating it. + * updating it. No write barriers are needed here, provided + * we only update the current CPU's SLB shadow buffer. */ get_slb_shadow()->save_area[entry].esid = 0; - smp_wmb(); - get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, flags); - smp_wmb(); - get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, entry); - smp_wmb(); + get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, ssize, flags); + get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, ssize, entry); } static inline void slb_shadow_clear(unsigned long entry) @@ -74,7 +81,8 @@ static inline void slb_shadow_clear(unsigned long entry) get_slb_shadow()->save_area[entry].esid = 0; } -static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, +static inline void create_shadowed_slbe(unsigned long ea, int ssize, + unsigned long flags, unsigned long entry) { /* @@ -82,11 +90,11 @@ static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, * we don't get a stale entry here if we get preempted by PHYP * between these two statements. */ - slb_shadow_update(ea, flags, entry); + slb_shadow_update(ea, ssize, flags, entry); asm volatile("slbmte %0,%1" : - : "r" (mk_vsid_data(ea, flags)), - "r" (mk_esid_data(ea, entry)) + : "r" (mk_vsid_data(ea, ssize, flags)), + "r" (mk_esid_data(ea, ssize, entry)) : "memory" ); } @@ -95,7 +103,7 @@ void slb_flush_and_rebolt(void) /* If you change this make sure you change SLB_NUM_BOLTED * appropriately too. */ unsigned long linear_llp, vmalloc_llp, lflags, vflags; - unsigned long ksp_esid_data; + unsigned long ksp_esid_data, ksp_vsid_data; WARN_ON(!irqs_disabled()); @@ -104,13 +112,15 @@ void slb_flush_and_rebolt(void) lflags = SLB_VSID_KERNEL | linear_llp; vflags = SLB_VSID_KERNEL | vmalloc_llp; - ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); - if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) { + ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, 2); + if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) { ksp_esid_data &= ~SLB_ESID_V; + ksp_vsid_data = 0; slb_shadow_clear(2); } else { /* Update stack entry; others don't change */ - slb_shadow_update(get_paca()->kstack, lflags, 2); + slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2); + ksp_vsid_data = get_slb_shadow()->save_area[2].vsid; } /* We need to do this all in asm, so we're sure we don't touch @@ -122,9 +132,9 @@ void slb_flush_and_rebolt(void) /* Slot 2 - kernel stack */ "slbmte %2,%3\n" "isync" - :: "r"(mk_vsid_data(VMALLOC_START, vflags)), - "r"(mk_esid_data(VMALLOC_START, 1)), - "r"(mk_vsid_data(ksp_esid_data, lflags)), + :: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)), + "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, 1)), + "r"(ksp_vsid_data), "r"(ksp_esid_data) : "memory"); } @@ -134,7 +144,7 @@ void slb_vmalloc_update(void) unsigned long vflags; vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp; - slb_shadow_update(VMALLOC_START, vflags, 1); + slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, 1); slb_flush_and_rebolt(); } @@ -142,7 +152,7 @@ void slb_vmalloc_update(void) void switch_slb(struct task_struct *tsk, struct mm_struct *mm) { unsigned long offset = get_paca()->slb_cache_ptr; - unsigned long esid_data = 0; + unsigned long slbie_data = 0; unsigned long pc = KSTK_EIP(tsk); unsigned long stack = KSTK_ESP(tsk); unsigned long unmapped_base; @@ -151,9 +161,12 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) int i; asm volatile("isync" : : : "memory"); for (i = 0; i < offset; i++) { - esid_data = ((unsigned long)get_paca()->slb_cache[i] - << SID_SHIFT) | SLBIE_C; - asm volatile("slbie %0" : : "r" (esid_data)); + slbie_data = (unsigned long)get_paca()->slb_cache[i] + << SID_SHIFT; /* EA */ + slbie_data |= user_segment_size(slbie_data) + << SLBIE_SSIZE_SHIFT; + slbie_data |= SLBIE_C; /* C set for user addresses */ + asm volatile("slbie %0" : : "r" (slbie_data)); } asm volatile("isync" : : : "memory"); } else { @@ -162,7 +175,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) /* Workaround POWER5 < DD2.1 issue */ if (offset == 1 || offset > SLB_CACHE_ENTRIES) - asm volatile("slbie %0" : : "r" (esid_data)); + asm volatile("slbie %0" : : "r" (slbie_data)); get_paca()->slb_cache_ptr = 0; get_paca()->context = mm->context; @@ -245,9 +258,9 @@ void slb_initialize(void) asm volatile("isync":::"memory"); asm volatile("slbmte %0,%0"::"r" (0) : "memory"); asm volatile("isync; slbia; isync":::"memory"); - create_shadowed_slbe(PAGE_OFFSET, lflags, 0); + create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0); - create_shadowed_slbe(VMALLOC_START, vflags, 1); + create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1); /* We don't bolt the stack for the time being - we're in boot, * so the stack is in the bolted segment. By the time it goes diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index cd1a93d..1328a81 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -57,7 +57,10 @@ _GLOBAL(slb_allocate_realmode) */ _GLOBAL(slb_miss_kernel_load_linear) li r11,0 +BEGIN_FTR_SECTION b slb_finish_load +END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) + b slb_finish_load_1T 1: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below * will be patched by the kernel at boot @@ -68,13 +71,16 @@ BEGIN_FTR_SECTION cmpldi r11,(VMALLOC_SIZE >> 28) - 1 bgt 5f lhz r11,PACAVMALLOCSLLP(r13) - b slb_finish_load + b 6f 5: END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) _GLOBAL(slb_miss_kernel_load_io) li r11,0 +6: +BEGIN_FTR_SECTION b slb_finish_load - +END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) + b slb_finish_load_1T 0: /* user address: proto-VSID = context << 15 | ESID. First check * if the address is within the boundaries of the user region @@ -122,7 +128,13 @@ _GLOBAL(slb_miss_kernel_load_io) #endif /* CONFIG_PPC_MM_SLICES */ ld r9,PACACONTEXTID(r13) +BEGIN_FTR_SECTION + cmpldi r10,0x1000 +END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) rldimi r10,r9,USER_ESID_BITS,0 +BEGIN_FTR_SECTION + bge slb_finish_load_1T +END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) b slb_finish_load 8: /* invalid EA */ @@ -188,7 +200,7 @@ _GLOBAL(slb_allocate_user) * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET */ slb_finish_load: - ASM_VSID_SCRAMBLE(r10,r9) + ASM_VSID_SCRAMBLE(r10,r9,256M) rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */ /* r3 = EA, r11 = VSID data */ @@ -213,7 +225,7 @@ BEGIN_FW_FTR_SECTION END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) #endif /* CONFIG_PPC_ISERIES */ - ld r10,PACASTABRR(r13) +7: ld r10,PACASTABRR(r13) addi r10,r10,1 /* use a cpu feature mask if we ever change our slb size */ cmpldi r10,SLB_NUM_ENTRIES @@ -259,3 +271,20 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) crclr 4*cr0+eq /* set result to "success" */ blr +/* + * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. + * We assume legacy iSeries will never have 1T segments. + * + * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 + */ +slb_finish_load_1T: + srdi r10,r10,40-28 /* get 1T ESID */ + ASM_VSID_SCRAMBLE(r10,r9,1T) + rldimi r11,r10,SLB_VSID_SHIFT_1T,16 /* combine VSID and flags */ + li r10,MMU_SEGSIZE_1T + rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */ + + /* r3 = EA, r11 = VSID data */ + clrrdi r3,r3,SID_SHIFT_1T /* clear out non-ESID bits */ + b 7b + diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index d5fd390..319826e 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -551,6 +551,7 @@ EXPORT_SYMBOL_GPL(get_slice_psize); * * This is also called in init_new_context() to change back the user * psize from whatever the parent context had it set to + * N.B. This may be called before mm->context.id has been set. * * This function will only change the content of the {low,high)_slice_psize * masks, it will not flush SLBs as this shall be handled lazily by the diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index 28492bb..9e85bda 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c @@ -122,12 +122,12 @@ static int __ste_allocate(unsigned long ea, struct mm_struct *mm) /* Kernel or user address? */ if (is_kernel_addr(ea)) { - vsid = get_kernel_vsid(ea); + vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M); } else { if ((ea >= TASK_SIZE_USER64) || (! mm)) return 1; - vsid = get_vsid(mm->context.id, ea); + vsid = get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M); } stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid); @@ -261,7 +261,7 @@ void __init stabs_alloc(void) */ void stab_initialize(unsigned long stab) { - unsigned long vsid = get_kernel_vsid(PAGE_OFFSET); + unsigned long vsid = get_kernel_vsid(PAGE_OFFSET, MMU_SEGSIZE_256M); unsigned long stabreal; asm volatile("isync; slbia; isync":::"memory"); diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c index cbd34fc..eafbca5 100644 --- a/arch/powerpc/mm/tlb_64.c +++ b/arch/powerpc/mm/tlb_64.c @@ -132,6 +132,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); unsigned long vsid, vaddr; unsigned int psize; + int ssize; real_pte_t rpte; int i; @@ -161,11 +162,14 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, /* Build full vaddr */ if (!is_kernel_addr(addr)) { - vsid = get_vsid(mm->context.id, addr); + ssize = user_segment_size(addr); + vsid = get_vsid(mm->context.id, addr, ssize); WARN_ON(vsid == 0); - } else - vsid = get_kernel_vsid(addr); - vaddr = (vsid << 28 ) | (addr & 0x0fffffff); + } else { + vsid = get_kernel_vsid(addr, mmu_kernel_ssize); + ssize = mmu_kernel_ssize; + } + vaddr = hpt_va(addr, vsid, ssize); rpte = __real_pte(__pte(pte), ptep); /* @@ -175,7 +179,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, * and decide to use local invalidates instead... */ if (!batch->active) { - flush_hash_page(vaddr, rpte, psize, 0); + flush_hash_page(vaddr, rpte, psize, ssize, 0); return; } @@ -189,13 +193,15 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, * We also need to ensure only one page size is present in a given * batch */ - if (i != 0 && (mm != batch->mm || batch->psize != psize)) { + if (i != 0 && (mm != batch->mm || batch->psize != psize || + batch->ssize != ssize)) { __flush_tlb_pending(batch); i = 0; } if (i == 0) { batch->mm = mm; batch->psize = psize; + batch->ssize = ssize; } batch->pte[i] = rpte; batch->vaddr[i] = vaddr; @@ -222,7 +228,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch) local = 1; if (i == 1) flush_hash_page(batch->vaddr[0], batch->pte[0], - batch->psize, local); + batch->psize, batch->ssize, local); else flush_hash_range(i, local); batch->index = 0; |