summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/book3s/64/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/book3s/64/pgtable.h')
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h86
1 files changed, 24 insertions, 62 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 2741ac6..ddc08bf 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -8,7 +8,6 @@
#include <asm/book3s/64/hash.h>
#include <asm/barrier.h>
-#define FIRST_USER_ADDRESS 0UL
/*
* Size of EA range mapped by our pagetables.
@@ -25,27 +24,16 @@
/*
* Define the address range of the kernel non-linear virtual area
*/
-
-#ifdef CONFIG_PPC_BOOK3E
-#define KERN_VIRT_START ASM_CONST(0x8000000000000000)
-#else
#define KERN_VIRT_START ASM_CONST(0xD000000000000000)
-#endif
#define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000)
-
/*
* The vmalloc space starts at the beginning of that region, and
* occupies half of it on hash CPUs and a quarter of it on Book3E
* (we keep a quarter for the virtual memmap)
*/
#define VMALLOC_START KERN_VIRT_START
-#ifdef CONFIG_PPC_BOOK3E
-#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 2)
-#else
#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
-#endif
#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
-
/*
* The second half of the kernel virtual space is used for IO mappings,
* it's itself carved into the PIO region (ISA and PHB IO space) and
@@ -64,7 +52,6 @@
#define IOREMAP_BASE (PHB_IO_END)
#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE)
-
/*
* Region IDs
*/
@@ -79,32 +66,39 @@
/*
* Defines the address of the vmemap area, in its own region on
- * hash table CPUs and after the vmalloc space on Book3E
+ * hash table CPUs.
*/
-#ifdef CONFIG_PPC_BOOK3E
-#define VMEMMAP_BASE VMALLOC_END
-#define VMEMMAP_END KERN_IO_START
-#else
#define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT)
-#endif
#define vmemmap ((struct page *)VMEMMAP_BASE)
-/*
- * Include the PTE bits definitions
- */
-#ifdef CONFIG_PPC_BOOK3S
-#include <asm/book3s/64/hash.h>
-#else
-#include <asm/pte-book3e.h>
-#endif
-#include <asm/pte-common.h>
-
#ifdef CONFIG_PPC_MM_SLICES
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#endif /* CONFIG_PPC_MM_SLICES */
+/*
+ * THP pages can't be special. So use the _PAGE_SPECIAL
+ */
+#define _PAGE_SPLITTING _PAGE_SPECIAL
+
+/*
+ * We need to differentiate between explicit huge page and THP huge
+ * page, since THP huge page also need to track real subpage details
+ */
+#define _PAGE_THP_HUGE _PAGE_4K_PFN
+
+/*
+ * set of bits not changed in pmd_modify.
+ */
+#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | \
+ _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPLITTING | \
+ _PAGE_THP_HUGE)
+/*
+ * Default defines for things which we don't use.
+ * We should get this removed.
+ */
+#include <asm/pte-common.h>
#ifndef __ASSEMBLY__
/*
@@ -144,7 +138,7 @@
#define PMD_BAD_BITS (PTE_TABLE_SIZE-1)
#define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
-#define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval))
+#define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval))
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \
|| (pmd_val(pmd) & PMD_BAD_BITS))
@@ -206,7 +200,6 @@ static inline unsigned long pte_update(struct mm_struct *mm,
unsigned long set,
int huge)
{
-#ifdef PTE_ATOMIC_UPDATES
unsigned long old, tmp;
__asm__ __volatile__(
@@ -220,18 +213,12 @@ static inline unsigned long pte_update(struct mm_struct *mm,
: "=&r" (old), "=&r" (tmp), "=m" (*ptep)
: "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set)
: "cc" );
-#else
- unsigned long old = pte_val(*ptep);
- *ptep = __pte((old & ~clr) | set);
-#endif
/* huge pages use the old page table lock */
if (!huge)
assert_pte_locked(mm, addr);
-#ifdef CONFIG_PPC_STD_MMU_64
if (old & _PAGE_HASHPTE)
hpte_need_flush(mm, addr, ptep, old, huge);
-#endif
return old;
}
@@ -313,7 +300,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
unsigned long bits = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
-#ifdef PTE_ATOMIC_UPDATES
unsigned long old, tmp;
__asm__ __volatile__(
@@ -326,10 +312,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
:"=&r" (old), "=&r" (tmp), "=m" (*ptep)
:"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY)
:"cc");
-#else
- unsigned long old = pte_val(*ptep);
- *ptep = __pte(old | bits);
-#endif
}
#define __HAVE_ARCH_PTE_SAME
@@ -367,28 +349,8 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
void pgtable_cache_init(void);
-#endif /* __ASSEMBLY__ */
/*
- * THP pages can't be special. So use the _PAGE_SPECIAL
- */
-#define _PAGE_SPLITTING _PAGE_SPECIAL
-
-/*
- * We need to differentiate between explicit huge page and THP huge
- * page, since THP huge page also need to track real subpage details
- */
-#define _PAGE_THP_HUGE _PAGE_4K_PFN
-
-/*
- * set of bits not changed in pmd_modify.
- */
-#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | \
- _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPLITTING | \
- _PAGE_THP_HUGE)
-
-#ifndef __ASSEMBLY__
-/*
* The linux hugepage PMD now include the pmd entries followed by the address
* to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits.
* [ 1 bit secondary | 3 bit hidx | 1 bit valid | 000]. We use one byte per
OpenPOWER on IntegriCloud