summaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/arch_timer.h9
-rw-r--r--arch/arm/include/asm/assembler.h17
-rw-r--r--arch/arm/include/asm/cp15.h5
-rw-r--r--arch/arm/include/asm/cputype.h45
-rw-r--r--arch/arm/include/asm/glue-cache.h27
-rw-r--r--arch/arm/include/asm/glue-df.h8
-rw-r--r--arch/arm/include/asm/glue-proc.h9
-rw-r--r--arch/arm/include/asm/hugetlb-3level.h71
-rw-r--r--arch/arm/include/asm/hugetlb.h84
-rw-r--r--arch/arm/include/asm/irqflags.h22
-rw-r--r--arch/arm/include/asm/mach/arch.h5
-rw-r--r--arch/arm/include/asm/memory.h18
-rw-r--r--arch/arm/include/asm/mpu.h76
-rw-r--r--arch/arm/include/asm/page.h2
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h24
-rw-r--r--arch/arm/include/asm/pgtable-3level.h96
-rw-r--r--arch/arm/include/asm/pgtable.h3
-rw-r--r--arch/arm/include/asm/proc-fns.h30
-rw-r--r--arch/arm/include/asm/psci.h9
-rw-r--r--arch/arm/include/asm/ptrace.h4
-rw-r--r--arch/arm/include/asm/smp.h5
-rw-r--r--arch/arm/include/asm/smp_plat.h22
-rw-r--r--arch/arm/include/asm/suspend.h5
-rw-r--r--arch/arm/include/asm/system_info.h1
-rw-r--r--arch/arm/include/asm/tlb.h6
-rw-r--r--arch/arm/include/asm/tlbflush.h27
-rw-r--r--arch/arm/include/asm/v7m.h44
27 files changed, 641 insertions, 33 deletions
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index 7c1bfc0..accefe0 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -80,15 +80,6 @@ static inline u32 arch_timer_get_cntfrq(void)
return val;
}
-static inline u64 arch_counter_get_cntpct(void)
-{
- u64 cval;
-
- isb();
- asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
- return cval;
-}
-
static inline u64 arch_counter_get_cntvct(void)
{
u64 cval;
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 05ee9ee..a5fef71 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -136,7 +136,11 @@
* assumes FIQs are enabled, and that the processor is in SVC mode.
*/
.macro save_and_disable_irqs, oldcpsr
+#ifdef CONFIG_CPU_V7M
+ mrs \oldcpsr, primask
+#else
mrs \oldcpsr, cpsr
+#endif
disable_irq
.endm
@@ -150,7 +154,11 @@
* guarantee that this will preserve the flags.
*/
.macro restore_irqs_notrace, oldcpsr
+#ifdef CONFIG_CPU_V7M
+ msr primask, \oldcpsr
+#else
msr cpsr_c, \oldcpsr
+#endif
.endm
.macro restore_irqs, oldcpsr
@@ -229,7 +237,14 @@
#endif
.endm
-#ifdef CONFIG_THUMB2_KERNEL
+#if defined(CONFIG_CPU_V7M)
+ /*
+ * setmode is used to assert to be in svc mode during boot. For v7-M
+ * this is done in __v7m_setup, so setmode can be empty here.
+ */
+ .macro setmode, mode, reg
+ .endm
+#elif defined(CONFIG_THUMB2_KERNEL)
.macro setmode, mode, reg
mov \reg, #\mode
msr cpsr_c, \reg
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index 1f3262e..a524a23 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -23,6 +23,11 @@
#define CR_RR (1 << 14) /* Round Robin cache replacement */
#define CR_L4 (1 << 15) /* LDR pc can set T bit */
#define CR_DT (1 << 16)
+#ifdef CONFIG_MMU
+#define CR_HA (1 << 17) /* Hardware management of Access Flag */
+#else
+#define CR_BR (1 << 17) /* MPU Background region enable (PMSA) */
+#endif
#define CR_IT (1 << 18)
#define CR_ST (1 << 19)
#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index dba62cb..8c25dc4 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -8,8 +8,25 @@
#define CPUID_CACHETYPE 1
#define CPUID_TCM 2
#define CPUID_TLBTYPE 3
+#define CPUID_MPUIR 4
#define CPUID_MPIDR 5
+#ifdef CONFIG_CPU_V7M
+#define CPUID_EXT_PFR0 0x40
+#define CPUID_EXT_PFR1 0x44
+#define CPUID_EXT_DFR0 0x48
+#define CPUID_EXT_AFR0 0x4c
+#define CPUID_EXT_MMFR0 0x50
+#define CPUID_EXT_MMFR1 0x54
+#define CPUID_EXT_MMFR2 0x58
+#define CPUID_EXT_MMFR3 0x5c
+#define CPUID_EXT_ISAR0 0x60
+#define CPUID_EXT_ISAR1 0x64
+#define CPUID_EXT_ISAR2 0x68
+#define CPUID_EXT_ISAR3 0x6c
+#define CPUID_EXT_ISAR4 0x70
+#define CPUID_EXT_ISAR5 0x74
+#else
#define CPUID_EXT_PFR0 "c1, 0"
#define CPUID_EXT_PFR1 "c1, 1"
#define CPUID_EXT_DFR0 "c1, 2"
@@ -24,6 +41,7 @@
#define CPUID_EXT_ISAR3 "c2, 3"
#define CPUID_EXT_ISAR4 "c2, 4"
#define CPUID_EXT_ISAR5 "c2, 5"
+#endif
#define MPIDR_SMP_BITMASK (0x3 << 30)
#define MPIDR_SMP_VALUE (0x2 << 30)
@@ -81,7 +99,23 @@ extern unsigned int processor_id;
__val; \
})
-#else /* ifdef CONFIG_CPU_CP15 */
+#elif defined(CONFIG_CPU_V7M)
+
+#include <asm/io.h>
+#include <asm/v7m.h>
+
+#define read_cpuid(reg) \
+ ({ \
+ WARN_ON_ONCE(1); \
+ 0; \
+ })
+
+static inline unsigned int __attribute_const__ read_cpuid_ext(unsigned offset)
+{
+ return readl(BASEADDR_V7M_SCB + offset);
+}
+
+#else /* ifdef CONFIG_CPU_CP15 / elif defined (CONFIG_CPU_V7M) */
/*
* read_cpuid and read_cpuid_ext should only ever be called on machines that
@@ -108,7 +142,14 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)
return read_cpuid(CPUID_ID);
}
-#else /* ifdef CONFIG_CPU_CP15 */
+#elif defined(CONFIG_CPU_V7M)
+
+static inline unsigned int __attribute_const__ read_cpuid_id(void)
+{
+ return readl(BASEADDR_V7M_SCB + V7M_SCB_CPUID);
+}
+
+#else /* ifdef CONFIG_CPU_CP15 / elif defined(CONFIG_CPU_V7M) */
static inline unsigned int __attribute_const__ read_cpuid_id(void)
{
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index ea289e1..c81adc0 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -117,10 +117,37 @@
# endif
#endif
+#if defined(CONFIG_CPU_V7M)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE nop
+# endif
+#endif
+
#if !defined(_CACHE) && !defined(MULTI_CACHE)
#error Unknown cache maintenance model
#endif
+#ifndef __ASSEMBLER__
+extern inline void nop_flush_icache_all(void) { }
+extern inline void nop_flush_kern_cache_all(void) { }
+extern inline void nop_flush_kern_cache_louis(void) { }
+extern inline void nop_flush_user_cache_all(void) { }
+extern inline void nop_flush_user_cache_range(unsigned long a,
+ unsigned long b, unsigned int c) { }
+
+extern inline void nop_coherent_kern_range(unsigned long a, unsigned long b) { }
+extern inline int nop_coherent_user_range(unsigned long a,
+ unsigned long b) { return 0; }
+extern inline void nop_flush_kern_dcache_area(void *a, size_t s) { }
+
+extern inline void nop_dma_flush_range(const void *a, const void *b) { }
+
+extern inline void nop_dma_map_area(const void *s, size_t l, int f) { }
+extern inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
+#endif
+
#ifndef MULTI_CACHE
#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h
index b6e9f2c..6b70f1b 100644
--- a/arch/arm/include/asm/glue-df.h
+++ b/arch/arm/include/asm/glue-df.h
@@ -95,6 +95,14 @@
# endif
#endif
+#ifdef CONFIG_CPU_ABRT_NOMMU
+# ifdef CPU_DABORT_HANDLER
+# define MULTI_DABORT 1
+# else
+# define CPU_DABORT_HANDLER nommu_early_abort
+# endif
+#endif
+
#ifndef CPU_DABORT_HANDLER
#error Unknown data abort handler type
#endif
diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h
index 8017e94..74a8b84 100644
--- a/arch/arm/include/asm/glue-proc.h
+++ b/arch/arm/include/asm/glue-proc.h
@@ -230,6 +230,15 @@
# endif
#endif
+#ifdef CONFIG_CPU_V7M
+# ifdef CPU_NAME
+# undef MULTI_CPU
+# define MULTI_CPU
+# else
+# define CPU_NAME cpu_v7m
+# endif
+#endif
+
#ifdef CONFIG_CPU_PJ4B
# ifdef CPU_NAME
# undef MULTI_CPU
diff --git a/arch/arm/include/asm/hugetlb-3level.h b/arch/arm/include/asm/hugetlb-3level.h
new file mode 100644
index 0000000..d4014fb
--- /dev/null
+++ b/arch/arm/include/asm/hugetlb-3level.h
@@ -0,0 +1,71 @@
+/*
+ * arch/arm/include/asm/hugetlb-3level.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * Based on arch/x86/include/asm/hugetlb.h.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_ARM_HUGETLB_3LEVEL_H
+#define _ASM_ARM_HUGETLB_3LEVEL_H
+
+
+/*
+ * If our huge pte is non-zero then mark the valid bit.
+ * This allows pte_present(huge_ptep_get(ptep)) to return true for non-zero
+ * ptes.
+ * (The valid bit is automatically cleared by set_pte_at for PROT_NONE ptes).
+ */
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+ pte_t retval = *ptep;
+ if (pte_val(retval))
+ pte_val(retval) |= L_PTE_VALID;
+ return retval;
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ set_pte_at(mm, addr, ptep, pte);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_clear_flush(vma, addr, ptep);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ return ptep_get_and_clear(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+#endif /* _ASM_ARM_HUGETLB_3LEVEL_H */
diff --git a/arch/arm/include/asm/hugetlb.h b/arch/arm/include/asm/hugetlb.h
new file mode 100644
index 0000000..1f1b1cd
--- /dev/null
+++ b/arch/arm/include/asm/hugetlb.h
@@ -0,0 +1,84 @@
+/*
+ * arch/arm/include/asm/hugetlb.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * Based on arch/x86/include/asm/hugetlb.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_ARM_HUGETLB_H
+#define _ASM_ARM_HUGETLB_H
+
+#include <asm/page.h>
+#include <asm-generic/hugetlb.h>
+
+#include <asm/hugetlb-3level.h>
+
+static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+ unsigned long addr, unsigned long end,
+ unsigned long floor,
+ unsigned long ceiling)
+{
+ free_pgd_range(tlb, addr, end, floor, ceiling);
+}
+
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr, unsigned long len)
+{
+ return 0;
+}
+
+static inline int prepare_hugepage_range(struct file *file,
+ unsigned long addr, unsigned long len)
+{
+ struct hstate *h = hstate_file(file);
+ if (len & ~huge_page_mask(h))
+ return -EINVAL;
+ if (addr & ~huge_page_mask(h))
+ return -EINVAL;
+ return 0;
+}
+
+static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+ return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+ return pte_wrprotect(pte);
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+ return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+ clear_bit(PG_dcache_clean, &page->flags);
+}
+
+#endif /* _ASM_ARM_HUGETLB_H */
diff --git a/arch/arm/include/asm/irqflags.h b/arch/arm/include/asm/irqflags.h
index 1e6cca5..3b763d6 100644
--- a/arch/arm/include/asm/irqflags.h
+++ b/arch/arm/include/asm/irqflags.h
@@ -8,6 +8,16 @@
/*
* CPU interrupt mask handling.
*/
+#ifdef CONFIG_CPU_V7M
+#define IRQMASK_REG_NAME_R "primask"
+#define IRQMASK_REG_NAME_W "primask"
+#define IRQMASK_I_BIT 1
+#else
+#define IRQMASK_REG_NAME_R "cpsr"
+#define IRQMASK_REG_NAME_W "cpsr_c"
+#define IRQMASK_I_BIT PSR_I_BIT
+#endif
+
#if __LINUX_ARM_ARCH__ >= 6
static inline unsigned long arch_local_irq_save(void)
@@ -15,7 +25,7 @@ static inline unsigned long arch_local_irq_save(void)
unsigned long flags;
asm volatile(
- " mrs %0, cpsr @ arch_local_irq_save\n"
+ " mrs %0, " IRQMASK_REG_NAME_R " @ arch_local_irq_save\n"
" cpsid i"
: "=r" (flags) : : "memory", "cc");
return flags;
@@ -129,7 +139,7 @@ static inline unsigned long arch_local_save_flags(void)
{
unsigned long flags;
asm volatile(
- " mrs %0, cpsr @ local_save_flags"
+ " mrs %0, " IRQMASK_REG_NAME_R " @ local_save_flags"
: "=r" (flags) : : "memory", "cc");
return flags;
}
@@ -140,7 +150,7 @@ static inline unsigned long arch_local_save_flags(void)
static inline void arch_local_irq_restore(unsigned long flags)
{
asm volatile(
- " msr cpsr_c, %0 @ local_irq_restore"
+ " msr " IRQMASK_REG_NAME_W ", %0 @ local_irq_restore"
:
: "r" (flags)
: "memory", "cc");
@@ -148,8 +158,8 @@ static inline void arch_local_irq_restore(unsigned long flags)
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
- return flags & PSR_I_BIT;
+ return flags & IRQMASK_I_BIT;
}
-#endif
-#endif
+#endif /* ifdef __KERNEL__ */
+#endif /* ifndef __ASM_ARM_IRQFLAGS_H */
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 308ad7d..75bf079 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -8,6 +8,8 @@
* published by the Free Software Foundation.
*/
+#include <linux/types.h>
+
#ifndef __ASSEMBLY__
struct tag;
@@ -16,8 +18,10 @@ struct pt_regs;
struct smp_operations;
#ifdef CONFIG_SMP
#define smp_ops(ops) (&(ops))
+#define smp_init_ops(ops) (&(ops))
#else
#define smp_ops(ops) (struct smp_operations *)NULL
+#define smp_init_ops(ops) (bool (*)(void))NULL
#endif
struct machine_desc {
@@ -41,6 +45,7 @@ struct machine_desc {
unsigned char reserve_lp2 :1; /* never has lp2 */
char restart_mode; /* default restart mode */
struct smp_operations *smp; /* SMP operations */
+ bool (*smp_init)(void);
void (*fixup)(struct tag *, char **,
struct meminfo *);
void (*reserve)(void);/* reserve mem blocks */
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 57870ab..584786f 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -18,6 +18,8 @@
#include <linux/types.h>
#include <linux/sizes.h>
+#include <asm/cache.h>
+
#ifdef CONFIG_NEED_MACH_MEMORY_H
#include <mach/memory.h>
#endif
@@ -141,6 +143,20 @@
#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
+/*
+ * Minimum guaranted alignment in pgd_alloc(). The page table pointers passed
+ * around in head.S and proc-*.S are shifted by this amount, in order to
+ * leave spare high bits for systems with physical address extension. This
+ * does not fully accomodate the 40-bit addressing capability of ARM LPAE, but
+ * gives us about 38-bits or so.
+ */
+#ifdef CONFIG_ARM_LPAE
+#define ARCH_PGD_SHIFT L1_CACHE_SHIFT
+#else
+#define ARCH_PGD_SHIFT 0
+#endif
+#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1)
+
#ifndef __ASSEMBLY__
/*
@@ -207,7 +223,7 @@ static inline unsigned long __phys_to_virt(unsigned long x)
* direct-mapped view. We assume this is the first page
* of RAM in the mem_map as well.
*/
-#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
+#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
/*
* These are *only* valid on the kernel direct mapped RAM memory.
diff --git a/arch/arm/include/asm/mpu.h b/arch/arm/include/asm/mpu.h
new file mode 100644
index 0000000..c3247cc
--- /dev/null
+++ b/arch/arm/include/asm/mpu.h
@@ -0,0 +1,76 @@
+#ifndef __ARM_MPU_H
+#define __ARM_MPU_H
+
+#ifdef CONFIG_ARM_MPU
+
+/* MPUIR layout */
+#define MPUIR_nU 1
+#define MPUIR_DREGION 8
+#define MPUIR_IREGION 16
+#define MPUIR_DREGION_SZMASK (0xFF << MPUIR_DREGION)
+#define MPUIR_IREGION_SZMASK (0xFF << MPUIR_IREGION)
+
+/* ID_MMFR0 data relevant to MPU */
+#define MMFR0_PMSA (0xF << 4)
+#define MMFR0_PMSAv7 (3 << 4)
+
+/* MPU D/I Size Register fields */
+#define MPU_RSR_SZ 1
+#define MPU_RSR_EN 0
+
+/* The D/I RSR value for an enabled region spanning the whole of memory */
+#define MPU_RSR_ALL_MEM 63
+
+/* Individual bits in the DR/IR ACR */
+#define MPU_ACR_XN (1 << 12)
+#define MPU_ACR_SHARED (1 << 2)
+
+/* C, B and TEX[2:0] bits only have semantic meanings when grouped */
+#define MPU_RGN_CACHEABLE 0xB
+#define MPU_RGN_SHARED_CACHEABLE (MPU_RGN_CACHEABLE | MPU_ACR_SHARED)
+#define MPU_RGN_STRONGLY_ORDERED 0
+
+/* Main region should only be shared for SMP */
+#ifdef CONFIG_SMP
+#define MPU_RGN_NORMAL (MPU_RGN_CACHEABLE | MPU_ACR_SHARED)
+#else
+#define MPU_RGN_NORMAL MPU_RGN_CACHEABLE
+#endif
+
+/* Access permission bits of ACR (only define those that we use)*/
+#define MPU_AP_PL1RW_PL0RW (0x3 << 8)
+#define MPU_AP_PL1RW_PL0R0 (0x2 << 8)
+#define MPU_AP_PL1RW_PL0NA (0x1 << 8)
+
+/* For minimal static MPU region configurations */
+#define MPU_PROBE_REGION 0
+#define MPU_BG_REGION 1
+#define MPU_RAM_REGION 2
+#define MPU_VECTORS_REGION 3
+
+/* Maximum number of regions Linux is interested in */
+#define MPU_MAX_REGIONS 16
+
+#define MPU_DATA_SIDE 0
+#define MPU_INSTR_SIDE 1
+
+#ifndef __ASSEMBLY__
+
+struct mpu_rgn {
+ /* Assume same attributes for d/i-side */
+ u32 drbar;
+ u32 drsr;
+ u32 dracr;
+};
+
+struct mpu_rgn_info {
+ u32 mpuir;
+ struct mpu_rgn rgns[MPU_MAX_REGIONS];
+};
+extern struct mpu_rgn_info mpu_rgn_info;
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* CONFIG_ARM_MPU */
+
+#endif
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 812a494..6363f3d 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -13,7 +13,7 @@
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
#ifndef __ASSEMBLY__
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index 18f5cef..626989f 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -30,6 +30,7 @@
#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
#define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0)
#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
+#define PMD_TABLE_BIT (_AT(pmdval_t, 1) << 1)
#define PMD_BIT4 (_AT(pmdval_t, 0))
#define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
#define PMD_APTABLE_SHIFT (61)
@@ -41,6 +42,8 @@
*/
#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
+#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
+#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
#define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
@@ -66,6 +69,7 @@
#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0)
#define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0)
#define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0)
+#define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1)
#define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */
#define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */
#define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
@@ -79,4 +83,24 @@
#define PHYS_MASK_SHIFT (40)
#define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1)
+/*
+ * TTBR0/TTBR1 split (PAGE_OFFSET):
+ * 0x40000000: T0SZ = 2, T1SZ = 0 (not used)
+ * 0x80000000: T0SZ = 0, T1SZ = 1
+ * 0xc0000000: T0SZ = 0, T1SZ = 2
+ *
+ * Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise
+ * booting secondary CPUs would end up using TTBR1 for the identity
+ * mapping set up in TTBR0.
+ */
+#if defined CONFIG_VMSPLIT_2G
+#define TTBR1_OFFSET 16 /* skip two L1 entries */
+#elif defined CONFIG_VMSPLIT_3G
+#define TTBR1_OFFSET (4096 * (1 + 3)) /* only L2, skip pgd + 3*pmd */
+#else
+#define TTBR1_OFFSET 0
+#endif
+
+#define TTBR1_SIZE (((PAGE_OFFSET >> 30) - 1) << 16)
+
#endif
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 86b8fe3..5689c18 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -33,7 +33,7 @@
#define PTRS_PER_PMD 512
#define PTRS_PER_PGD 4
-#define PTE_HWTABLE_PTRS (PTRS_PER_PTE)
+#define PTE_HWTABLE_PTRS (0)
#define PTE_HWTABLE_OFF (0)
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64))
@@ -48,20 +48,28 @@
#define PMD_SHIFT 21
#define PMD_SIZE (1UL << PMD_SHIFT)
-#define PMD_MASK (~(PMD_SIZE-1))
+#define PMD_MASK (~((1 << PMD_SHIFT) - 1))
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
+#define PGDIR_MASK (~((1 << PGDIR_SHIFT) - 1))
/*
* section address mask and size definitions.
*/
#define SECTION_SHIFT 21
#define SECTION_SIZE (1UL << SECTION_SHIFT)
-#define SECTION_MASK (~(SECTION_SIZE-1))
+#define SECTION_MASK (~((1 << SECTION_SHIFT) - 1))
#define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE)
/*
+ * Hugetlb definitions.
+ */
+#define HPAGE_SHIFT PMD_SHIFT
+#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+
+/*
* "Linux" PTE definitions for LPAE.
*
* These bits overlap with the hardware bits but the naming is preserved for
@@ -79,6 +87,11 @@
#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */
+#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
+#define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
+#define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
+#define PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
+
/*
* To be used in assembly code with the upper page attributes.
*/
@@ -166,8 +179,83 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
clean_pmd_entry(pmdp); \
} while (0)
+/*
+ * For 3 levels of paging the PTE_EXT_NG bit will be set for user address ptes
+ * that are written to a page table but not for ptes created with mk_pte.
+ *
+ * In hugetlb_no_page, a new huge pte (new_pte) is generated and passed to
+ * hugetlb_cow, where it is compared with an entry in a page table.
+ * This comparison test fails erroneously leading ultimately to a memory leak.
+ *
+ * To correct this behaviour, we mask off PTE_EXT_NG for any pte that is
+ * present before running the comparison.
+ */
+#define __HAVE_ARCH_PTE_SAME
+#define pte_same(pte_a,pte_b) ((pte_present(pte_a) ? pte_val(pte_a) & ~PTE_EXT_NG \
+ : pte_val(pte_a)) \
+ == (pte_present(pte_b) ? pte_val(pte_b) & ~PTE_EXT_NG \
+ : pte_val(pte_b)))
+
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext)))
+#define pte_huge(pte) (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT))
+#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
+
+#define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF)
+
+#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY))
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
+#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
+#endif
+
+#define PMD_BIT_FUNC(fn,op) \
+static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
+
+PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY);
+PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
+PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING);
+PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY);
+PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY);
+PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
+
+#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
+
+#define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
+#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
+
+/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
+#define pmd_mknotpresent(pmd) (__pmd(0))
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+ const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | PMD_SECT_RDONLY |
+ PMD_SECT_VALID | PMD_SECT_NONE;
+ pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
+ return pmd;
+}
+
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd)
+{
+ BUG_ON(addr >= TASK_SIZE);
+
+ /* create a faulting entry if PROT_NONE protected */
+ if (pmd_val(pmd) & PMD_SECT_NONE)
+ pmd_val(pmd) &= ~PMD_SECT_VALID;
+
+ *pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG);
+ flush_pmd_entry(pmdp);
+}
+
+static inline int has_transparent_hugepage(void)
+{
+ return 1;
+}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_PGTABLE_3LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 9bcd262..eaedce7 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -24,6 +24,9 @@
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
+
+#include <asm/tlbflush.h>
+
#ifdef CONFIG_ARM_LPAE
#include <asm/pgtable-3level.h>
#else
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index f3628fb..5324c11 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -60,7 +60,7 @@ extern struct processor {
/*
* Set the page table
*/
- void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm);
+ void (*switch_mm)(phys_addr_t pgd_phys, struct mm_struct *mm);
/*
* Set a possibly extended PTE. Non-extended PTEs should
* ignore 'ext'.
@@ -82,7 +82,7 @@ extern void cpu_proc_init(void);
extern void cpu_proc_fin(void);
extern int cpu_do_idle(void);
extern void cpu_dcache_clean_area(void *, int);
-extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
+extern void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
#ifdef CONFIG_ARM_LPAE
extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte);
#else
@@ -116,13 +116,25 @@ extern void cpu_resume(void);
#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
#ifdef CONFIG_ARM_LPAE
+
+#define cpu_get_ttbr(nr) \
+ ({ \
+ u64 ttbr; \
+ __asm__("mrrc p15, " #nr ", %Q0, %R0, c2" \
+ : "=r" (ttbr)); \
+ ttbr; \
+ })
+
+#define cpu_set_ttbr(nr, val) \
+ do { \
+ u64 ttbr = val; \
+ __asm__("mcrr p15, " #nr ", %Q0, %R0, c2" \
+ : : "r" (ttbr)); \
+ } while (0)
+
#define cpu_get_pgd() \
({ \
- unsigned long pg, pg2; \
- __asm__("mrrc p15, 0, %0, %1, c2" \
- : "=r" (pg), "=r" (pg2) \
- : \
- : "cc"); \
+ u64 pg = cpu_get_ttbr(0); \
pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \
(pgd_t *)phys_to_virt(pg); \
})
@@ -137,6 +149,10 @@ extern void cpu_resume(void);
})
#endif
+#else /*!CONFIG_MMU */
+
+#define cpu_switch_mm(pgd,mm) { }
+
#endif
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
index ce0dbe7..c4ae171 100644
--- a/arch/arm/include/asm/psci.h
+++ b/arch/arm/include/asm/psci.h
@@ -32,5 +32,14 @@ struct psci_operations {
};
extern struct psci_operations psci_ops;
+extern struct smp_operations psci_smp_ops;
+
+#ifdef CONFIG_ARM_PSCI
+void psci_init(void);
+bool psci_smp_available(void);
+#else
+static inline void psci_init(void) { }
+static inline bool psci_smp_available(void) { return false; }
+#endif
#endif /* __ASM_ARM_PSCI_H */
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 3d52ee1..04c99f3 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -45,6 +45,7 @@ struct pt_regs {
*/
static inline int valid_user_regs(struct pt_regs *regs)
{
+#ifndef CONFIG_CPU_V7M
unsigned long mode = regs->ARM_cpsr & MODE_MASK;
/*
@@ -67,6 +68,9 @@ static inline int valid_user_regs(struct pt_regs *regs)
regs->ARM_cpsr |= USR_MODE;
return 0;
+#else /* ifndef CONFIG_CPU_V7M */
+ return 1;
+#endif
}
static inline long regs_return_value(struct pt_regs *regs)
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index d3a22be..a8cae71c 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -65,7 +65,10 @@ asmlinkage void secondary_start_kernel(void);
* Initial data for bringing up a secondary CPU.
*/
struct secondary_data {
- unsigned long pgdir;
+ union {
+ unsigned long mpu_rgn_szr;
+ unsigned long pgdir;
+ };
unsigned long swapper_pg_dir;
void *stack;
};
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index e789832..6462a72 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -26,6 +26,9 @@ static inline bool is_smp(void)
}
/* all SMP configurations have the extended CPUID registers */
+#ifndef CONFIG_MMU
+#define tlb_ops_need_broadcast() 0
+#else
static inline int tlb_ops_need_broadcast(void)
{
if (!is_smp())
@@ -33,6 +36,7 @@ static inline int tlb_ops_need_broadcast(void)
return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
}
+#endif
#if !defined(CONFIG_SMP) || __LINUX_ARM_ARCH__ >= 7
#define cache_ops_need_broadcast() 0
@@ -66,4 +70,22 @@ static inline int get_logical_index(u32 mpidr)
return -EINVAL;
}
+/*
+ * NOTE ! Assembly code relies on the following
+ * structure memory layout in order to carry out load
+ * multiple from its base address. For more
+ * information check arch/arm/kernel/sleep.S
+ */
+struct mpidr_hash {
+ u32 mask; /* used by sleep.S */
+ u32 shift_aff[3]; /* used by sleep.S */
+ u32 bits;
+};
+
+extern struct mpidr_hash mpidr_hash;
+
+static inline u32 mpidr_hash_size(void)
+{
+ return 1 << mpidr_hash.bits;
+}
#endif
diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h
index 1c0a551..cd20029 100644
--- a/arch/arm/include/asm/suspend.h
+++ b/arch/arm/include/asm/suspend.h
@@ -1,6 +1,11 @@
#ifndef __ASM_ARM_SUSPEND_H
#define __ASM_ARM_SUSPEND_H
+struct sleep_save_sp {
+ u32 *save_ptr_stash;
+ u32 save_ptr_stash_phys;
+};
+
extern void cpu_resume(void);
extern int cpu_suspend(unsigned long, int (*)(unsigned long));
diff --git a/arch/arm/include/asm/system_info.h b/arch/arm/include/asm/system_info.h
index dfd386d..720ea03 100644
--- a/arch/arm/include/asm/system_info.h
+++ b/arch/arm/include/asm/system_info.h
@@ -11,6 +11,7 @@
#define CPU_ARCH_ARMv5TEJ 7
#define CPU_ARCH_ARMv6 8
#define CPU_ARCH_ARMv7 9
+#define CPU_ARCH_ARMv7M 10
#ifndef __ASSEMBLY__
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index bdf2b84..46e7cfb 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -204,6 +204,12 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
#endif
}
+static inline void
+tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
+{
+ tlb_add_flush(tlb, addr);
+}
+
#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index a3625d1..fdbb9e3 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -535,8 +535,33 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
}
#endif
+#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
+
#endif
-#endif /* CONFIG_MMU */
+#elif defined(CONFIG_SMP) /* !CONFIG_MMU */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/mm_types.h>
+
+static inline void local_flush_tlb_all(void) { }
+static inline void local_flush_tlb_mm(struct mm_struct *mm) { }
+static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { }
+static inline void local_flush_tlb_kernel_page(unsigned long kaddr) { }
+static inline void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { }
+static inline void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { }
+static inline void local_flush_bp_all(void) { }
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
+extern void flush_tlb_kernel_page(unsigned long kaddr);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void flush_bp_all(void);
+#endif /* __ASSEMBLY__ */
+
+#endif
#endif
diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h
new file mode 100644
index 0000000..fa88d09
--- /dev/null
+++ b/arch/arm/include/asm/v7m.h
@@ -0,0 +1,44 @@
+/*
+ * Common defines for v7m cpus
+ */
+#define V7M_SCS_ICTR IOMEM(0xe000e004)
+#define V7M_SCS_ICTR_INTLINESNUM_MASK 0x0000000f
+
+#define BASEADDR_V7M_SCB IOMEM(0xe000ed00)
+
+#define V7M_SCB_CPUID 0x00
+
+#define V7M_SCB_ICSR 0x04
+#define V7M_SCB_ICSR_PENDSVSET (1 << 28)
+#define V7M_SCB_ICSR_PENDSVCLR (1 << 27)
+#define V7M_SCB_ICSR_RETTOBASE (1 << 11)
+
+#define V7M_SCB_VTOR 0x08
+
+#define V7M_SCB_SCR 0x10
+#define V7M_SCB_SCR_SLEEPDEEP (1 << 2)
+
+#define V7M_SCB_CCR 0x14
+#define V7M_SCB_CCR_STKALIGN (1 << 9)
+
+#define V7M_SCB_SHPR2 0x1c
+#define V7M_SCB_SHPR3 0x20
+
+#define V7M_SCB_SHCSR 0x24
+#define V7M_SCB_SHCSR_USGFAULTENA (1 << 18)
+#define V7M_SCB_SHCSR_BUSFAULTENA (1 << 17)
+#define V7M_SCB_SHCSR_MEMFAULTENA (1 << 16)
+
+#define V7M_xPSR_FRAMEPTRALIGN 0x00000200
+#define V7M_xPSR_EXCEPTIONNO 0x000001ff
+
+/*
+ * When branching to an address that has bits [31:28] == 0xf an exception return
+ * occurs. Bits [27:5] are reserved (SBOP). If the processor implements the FP
+ * extension Bit [4] defines if the exception frame has space allocated for FP
+ * state information, SBOP otherwise. Bit [3] defines the mode that is returned
+ * to (0 -> handler mode; 1 -> thread mode). Bit [2] defines which sp is used
+ * (0 -> msp; 1 -> psp). Bits [1:0] are fixed to 0b01.
+ */
+#define EXC_RET_STACK_MASK 0x00000004
+#define EXC_RET_THREADMODE_PROCESSSTACK 0xfffffffd
OpenPOWER on IntegriCloud