summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-06-07 10:23:33 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-07 10:23:33 -0700
commitc90fca951e90ba470a3dc6087667edffcf8db21b (patch)
treeb131279a9290826e78884ee706cc0a4f1d82be4e /arch/powerpc/include/asm
parentc0ab85267e25e34ce8b7e4429f0ef01fa0795b80 (diff)
parentff5bc793e47b537bf3e904fada585e102c54dd8b (diff)
downloadop-kernel-dev-c90fca951e90ba470a3dc6087667edffcf8db21b.zip
op-kernel-dev-c90fca951e90ba470a3dc6087667edffcf8db21b.tar.gz
Merge tag 'powerpc-4.18-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman: "Notable changes: - Support for split PMD page table lock on 64-bit Book3S (Power8/9). - Add support for HAVE_RELIABLE_STACKTRACE, so we properly support live patching again. - Add support for patching barrier_nospec in copy_from_user() and syscall entry. - A couple of fixes for our data breakpoints on Book3S. - A series from Nick optimising TLB/mm handling with the Radix MMU. - Numerous small cleanups to squash sparse/gcc warnings from Mathieu Malaterre. - Several series optimising various parts of the 32-bit code from Christophe Leroy. - Removal of support for two old machines, "SBC834xE" and "C2K" ("GEFanuc,C2K"), which is why the diffstat has so many deletions. And many other small improvements & fixes. There's a few out-of-area changes. Some minor ftrace changes OK'ed by Steve, and a fix to our powernv cpuidle driver. Then there's a series touching mm, x86 and fs/proc/task_mmu.c, which cleans up some details around pkey support. It was ack'ed/reviewed by Ingo & Dave and has been in next for several weeks. Thanks to: Akshay Adiga, Alastair D'Silva, Alexey Kardashevskiy, Al Viro, Andrew Donnellan, Aneesh Kumar K.V, Anju T Sudhakar, Arnd Bergmann, Balbir Singh, Cédric Le Goater, Christophe Leroy, Christophe Lombard, Colin Ian King, Dave Hansen, Fabio Estevam, Finn Thain, Frederic Barrat, Gautham R. Shenoy, Haren Myneni, Hari Bathini, Ingo Molnar, Jonathan Neuschäfer, Josh Poimboeuf, Kamalesh Babulal, Madhavan Srinivasan, Mahesh Salgaonkar, Mark Greer, Mathieu Malaterre, Matthew Wilcox, Michael Neuling, Michal Suchanek, Naveen N. Rao, Nicholas Piggin, Nicolai Stange, Olof Johansson, Paul Gortmaker, Paul Mackerras, Peter Rosin, Pridhiviraj Paidipeddi, Ram Pai, Rashmica Gupta, Ravi Bangoria, Russell Currey, Sam Bobroff, Samuel Mendoza-Jonas, Segher Boessenkool, Shilpasri G Bhat, Simon Guo, Souptick Joarder, Stewart Smith, Thiago Jung Bauermann, Torsten Duwe, Vaibhav Jain, Wei Yongjun, Wolfram Sang, Yisheng Xie, YueHaibing" * tag 'powerpc-4.18-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (251 commits) powerpc/64s/radix: Fix missing ptesync in flush_cache_vmap cpuidle: powernv: Fix promotion from snooze if next state disabled powerpc: fix build failure by disabling attribute-alias warning in pci_32 ocxl: Fix missing unlock on error in afu_ioctl_enable_p9_wait() powerpc-opal: fix spelling mistake "Uniterrupted" -> "Uninterrupted" powerpc: fix spelling mistake: "Usupported" -> "Unsupported" powerpc/pkeys: Detach execute_only key on !PROT_EXEC powerpc/powernv: copy/paste - Mask SO bit in CR powerpc: Remove core support for Marvell mv64x60 hostbridges powerpc/boot: Remove core support for Marvell mv64x60 hostbridges powerpc/boot: Remove support for Marvell mv64x60 i2c controller powerpc/boot: Remove support for Marvell MPSC serial controller powerpc/embedded6xx: Remove C2K board support powerpc/lib: optimise PPC32 memcmp powerpc/lib: optimise 32 bits __clear_user() powerpc/time: inline arch_vtime_task_switch() powerpc/Makefile: set -mcpu=860 flag for the 8xx powerpc: Implement csum_ipv6_magic in assembly powerpc/32: Optimise __csum_partial() powerpc/lib: Adjust .balign inside string functions for PPC32 ...
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h20
-rw-r--r--arch/powerpc/include/asm/barrier.h15
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgalloc.h1
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h7
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h8
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-64k.h7
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h10
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h7
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h46
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h28
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix-4k.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix-64k.h4
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h86
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h7
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush.h12
-rw-r--r--arch/powerpc/include/asm/cache.h3
-rw-r--r--arch/powerpc/include/asm/cacheflush.h14
-rw-r--r--arch/powerpc/include/asm/checksum.h15
-rw-r--r--arch/powerpc/include/asm/cputable.h3
-rw-r--r--arch/powerpc/include/asm/cputime.h16
-rw-r--r--arch/powerpc/include/asm/eeh.h11
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h9
-rw-r--r--arch/powerpc/include/asm/ftrace.h31
-rw-r--r--arch/powerpc/include/asm/hardirq.h1
-rw-r--r--arch/powerpc/include/asm/hugetlb.h21
-rw-r--r--arch/powerpc/include/asm/hw_irq.h11
-rw-r--r--arch/powerpc/include/asm/imc-pmu.h1
-rw-r--r--arch/powerpc/include/asm/io.h10
-rw-r--r--arch/powerpc/include/asm/machdep.h2
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h6
-rw-r--r--arch/powerpc/include/asm/mmu_context.h5
-rw-r--r--arch/powerpc/include/asm/module.h10
-rw-r--r--arch/powerpc/include/asm/mpc52xx.h6
-rw-r--r--arch/powerpc/include/asm/nmi.h6
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgalloc.h1
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h41
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-40x.h3
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgalloc.h95
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable-64k.h57
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h46
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h58
-rw-r--r--arch/powerpc/include/asm/nohash/pte-book3e.h6
-rw-r--r--arch/powerpc/include/asm/opal-api.h8
-rw-r--r--arch/powerpc/include/asm/opal.h5
-rw-r--r--arch/powerpc/include/asm/paca.h3
-rw-r--r--arch/powerpc/include/asm/page.h1
-rw-r--r--arch/powerpc/include/asm/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/pkeys.h13
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h8
-rw-r--r--arch/powerpc/include/asm/pmac_pfunc.h1
-rw-r--r--arch/powerpc/include/asm/pnv-ocxl.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h1
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h6
-rw-r--r--arch/powerpc/include/asm/processor.h10
-rw-r--r--arch/powerpc/include/asm/pte-common.h8
-rw-r--r--arch/powerpc/include/asm/reg.h32
-rw-r--r--arch/powerpc/include/asm/rheap.h3
-rw-r--r--arch/powerpc/include/asm/rtas.h2
-rw-r--r--arch/powerpc/include/asm/setup.h9
-rw-r--r--arch/powerpc/include/asm/smp.h1
-rw-r--r--arch/powerpc/include/asm/sstep.h2
-rw-r--r--arch/powerpc/include/asm/switch_to.h1
-rw-r--r--arch/powerpc/include/asm/syscalls.h2
-rw-r--r--arch/powerpc/include/asm/systbl.h6
-rw-r--r--arch/powerpc/include/asm/thread_info.h8
-rw-r--r--arch/powerpc/include/asm/time.h11
-rw-r--r--arch/powerpc/include/asm/tlb.h13
-rw-r--r--arch/powerpc/include/asm/tm.h2
-rw-r--r--arch/powerpc/include/asm/trace.h7
-rw-r--r--arch/powerpc/include/asm/uaccess.h21
-rw-r--r--arch/powerpc/include/asm/xive-regs.h6
-rw-r--r--arch/powerpc/include/asm/xmon.h2
-rw-r--r--arch/powerpc/include/asm/xor.h12
-rw-r--r--arch/powerpc/include/asm/xor_altivec.h19
74 files changed, 474 insertions, 501 deletions
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index d9713ad..aa9e785 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -36,8 +36,7 @@ void kexec_copy_flush(struct kimage *image);
/* pseries hcall tracing */
extern struct static_key hcall_tracepoint_key;
void __trace_hcall_entry(unsigned long opcode, unsigned long *args);
-void __trace_hcall_exit(long opcode, unsigned long retval,
- unsigned long *retbuf);
+void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf);
/* OPAL tracing */
#ifdef HAVE_JUMP_LABEL
extern struct static_key opal_tracepoint_key;
@@ -81,18 +80,12 @@ void machine_check_exception(struct pt_regs *regs);
void emulation_assist_interrupt(struct pt_regs *regs);
/* signals, syscalls and interrupts */
-#ifdef CONFIG_PPC64
-int sys_swapcontext(struct ucontext __user *old_ctx,
- struct ucontext __user *new_ctx,
- long ctx_size, long r6, long r7, long r8, struct pt_regs *regs);
-#else
long sys_swapcontext(struct ucontext __user *old_ctx,
struct ucontext __user *new_ctx,
- int ctx_size, int r6, int r7, int r8, struct pt_regs *regs);
-int sys_debug_setcontext(struct ucontext __user *ctx,
- int ndbg, struct sig_dbg_op __user *dbg,
- int r6, int r7, int r8,
- struct pt_regs *regs);
+ long ctx_size);
+#ifdef CONFIG_PPC32
+long sys_debug_setcontext(struct ucontext __user *ctx,
+ int ndbg, struct sig_dbg_op __user *dbg);
int
ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp);
unsigned long __init early_init(unsigned long dt_ptr);
@@ -141,4 +134,7 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip);
void pnv_power9_force_smt4_catch(void);
void pnv_power9_force_smt4_release(void);
+void tm_enable(void);
+void tm_disable(void);
+void tm_abort(uint8_t cause);
#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index c7c6395..f67b3f6 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -76,6 +76,21 @@ do { \
___p1; \
})
+#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * Prevent execution of subsequent instructions until preceding branches have
+ * been fully resolved and are no longer executing speculatively.
+ */
+#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; nop
+
+// This also acts as a compiler barrier due to the memory clobber.
+#define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory")
+
+#else /* !CONFIG_PPC_BOOK3S_64 */
+#define barrier_nospec_asm
+#define barrier_nospec()
+#endif
+
#include <asm-generic/barrier.h>
#endif /* _ASM_POWERPC_BARRIER_H */
diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h
index 5073cc7..6a66739 100644
--- a/arch/powerpc/include/asm/book3s/32/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h
@@ -99,6 +99,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
static inline void pgtable_free(void *table, unsigned index_size)
{
if (!index_size) {
+ pgtable_page_dtor(virt_to_page(table));
free_page((unsigned long)table);
} else {
BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index c615abd..02f5acd 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -235,15 +235,18 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
}
-static inline void __ptep_set_access_flags(struct mm_struct *mm,
+static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
pte_t *ptep, pte_t entry,
- unsigned long address)
+ unsigned long address,
+ int psize)
{
unsigned long set = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
unsigned long clr = ~pte_val(entry) & _PAGE_RO;
pte_update(ptep, clr, set);
+
+ flush_tlb_page(vma, address);
}
#define __HAVE_ARCH_PTE_SAME
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index 4b54230..9a37986 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -38,8 +38,12 @@
#define H_PAGE_4K_PFN 0x0
#define H_PAGE_THP_HUGE 0x0
#define H_PAGE_COMBO 0x0
-#define H_PTE_FRAG_NR 0
-#define H_PTE_FRAG_SIZE_SHIFT 0
+
+/* 8 bytes per each pte entry */
+#define H_PTE_FRAG_SIZE_SHIFT (H_PTE_INDEX_SIZE + 3)
+#define H_PTE_FRAG_NR (PAGE_SIZE >> H_PTE_FRAG_SIZE_SHIFT)
+#define H_PMD_FRAG_SIZE_SHIFT (H_PMD_INDEX_SIZE + 3)
+#define H_PMD_FRAG_NR (PAGE_SIZE >> H_PMD_FRAG_SIZE_SHIFT)
/* memory key bits, only 8 keys supported */
#define H_PTE_PKEY_BIT0 0
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index cc82745..c81793d 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -46,6 +46,13 @@
#define H_PTE_FRAG_SIZE_SHIFT (H_PTE_INDEX_SIZE + 3 + 1)
#define H_PTE_FRAG_NR (PAGE_SIZE >> H_PTE_FRAG_SIZE_SHIFT)
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
+#define H_PMD_FRAG_SIZE_SHIFT (H_PMD_INDEX_SIZE + 3 + 1)
+#else
+#define H_PMD_FRAG_SIZE_SHIFT (H_PMD_INDEX_SIZE + 3)
+#endif
+#define H_PMD_FRAG_NR (PAGE_SIZE >> H_PMD_FRAG_SIZE_SHIFT)
+
#ifndef __ASSEMBLY__
#include <asm/errno.h>
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index cc8cd65..0387b15 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -23,16 +23,6 @@
H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
#define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
-#if (defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)) && \
- defined(CONFIG_PPC_64K_PAGES)
-/*
- * only with hash 64k we need to use the second half of pmd page table
- * to store pointer to deposited pgtable_t
- */
-#define H_PMD_CACHE_INDEX (H_PMD_INDEX_SIZE + 1)
-#else
-#define H_PMD_CACHE_INDEX H_PMD_INDEX_SIZE
-#endif
/*
* We store the slot details in the second half of page table.
* Increase the pud level table so that hugetlb ptes can be stored
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 5094696..9c8c669 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -134,10 +134,11 @@ typedef struct {
#ifdef CONFIG_PPC_SUBPAGE_PROT
struct subpage_prot_table spt;
#endif /* CONFIG_PPC_SUBPAGE_PROT */
-#ifdef CONFIG_PPC_64K_PAGES
- /* for 4K PTE fragment support */
+ /*
+ * pagetable fragment support
+ */
void *pte_frag;
-#endif
+ void *pmd_frag;
#ifdef CONFIG_SPAPR_TCE_IOMMU
struct list_head iommu_group_mem_list;
#endif
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index 558a159..01ee40f 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -42,7 +42,9 @@ extern struct kmem_cache *pgtable_cache[];
})
extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
+extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
extern void pte_fragment_free(unsigned long *, int);
+extern void pmd_fragment_free(unsigned long *);
extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
#ifdef CONFIG_SMP
extern void __tlb_remove_table(void *_table);
@@ -88,8 +90,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
* need to do this for 4k.
*/
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES) && \
- ((H_PGD_INDEX_SIZE == H_PUD_CACHE_INDEX) || \
- (H_PGD_INDEX_SIZE == H_PMD_CACHE_INDEX))
+ (H_PGD_INDEX_SIZE == H_PUD_CACHE_INDEX)
memset(pgd, 0, PGD_TABLE_SIZE);
#endif
return pgd;
@@ -124,36 +125,35 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
}
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
- unsigned long address)
+ unsigned long address)
{
/*
* By now all the pud entries should be none entries. So go
* ahead and flush the page walk cache
*/
flush_tlb_pgtable(tlb, address);
- pgtable_free_tlb(tlb, pud, PUD_CACHE_INDEX);
+ pgtable_free_tlb(tlb, pud, PUD_INDEX);
}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
- pgtable_gfp_flags(mm, GFP_KERNEL));
+ return pmd_fragment_alloc(mm, addr);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
- kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
+ pmd_fragment_free((unsigned long *)pmd);
}
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
- unsigned long address)
+ unsigned long address)
{
/*
* By now all the pud entries should be none entries. So go
* ahead and flush the page walk cache
*/
flush_tlb_pgtable(tlb, address);
- return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX);
+ return pgtable_free_tlb(tlb, pmd, PMD_INDEX);
}
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
@@ -173,31 +173,6 @@ static inline pgtable_t pmd_pgtable(pmd_t pmd)
return (pgtable_t)pmd_page_vaddr(pmd);
}
-#ifdef CONFIG_PPC_4K_PAGES
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
-{
- return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
-}
-
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
-{
- struct page *page;
- pte_t *pte;
-
- pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
- if (!pte)
- return NULL;
- page = virt_to_page(pte);
- if (!pgtable_page_ctor(page)) {
- __free_page(page);
- return NULL;
- }
- return pte;
-}
-#else /* if CONFIG_PPC_64K_PAGES */
-
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
@@ -209,7 +184,6 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
{
return (pgtable_t)pte_fragment_alloc(mm, address, 0);
}
-#endif
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
@@ -229,7 +203,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
* ahead and flush the page walk cache
*/
flush_tlb_pgtable(tlb, address);
- pgtable_free_tlb(tlb, table, 0);
+ pgtable_free_tlb(tlb, table, PTE_INDEX);
}
#define check_pgt_cache() do { } while (0)
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 47b5ffc..42fe7c2 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -212,13 +212,13 @@ extern unsigned long __pte_index_size;
extern unsigned long __pmd_index_size;
extern unsigned long __pud_index_size;
extern unsigned long __pgd_index_size;
-extern unsigned long __pmd_cache_index;
extern unsigned long __pud_cache_index;
#define PTE_INDEX_SIZE __pte_index_size
#define PMD_INDEX_SIZE __pmd_index_size
#define PUD_INDEX_SIZE __pud_index_size
#define PGD_INDEX_SIZE __pgd_index_size
-#define PMD_CACHE_INDEX __pmd_cache_index
+/* pmd table use page table fragments */
+#define PMD_CACHE_INDEX 0
#define PUD_CACHE_INDEX __pud_cache_index
/*
* Because of use of pte fragments and THP, size of page table
@@ -246,6 +246,12 @@ extern unsigned long __pte_frag_size_shift;
#define PTE_FRAG_SIZE_SHIFT __pte_frag_size_shift
#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
+extern unsigned long __pmd_frag_nr;
+#define PMD_FRAG_NR __pmd_frag_nr
+extern unsigned long __pmd_frag_size_shift;
+#define PMD_FRAG_SIZE_SHIFT __pmd_frag_size_shift
+#define PMD_FRAG_SIZE (1UL << PMD_FRAG_SIZE_SHIFT)
+
#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
#define PTRS_PER_PUD (1 << PUD_INDEX_SIZE)
@@ -273,6 +279,16 @@ extern unsigned long __pte_frag_size_shift;
/* Bits to mask out from a PGD to get to the PUD page */
#define PGD_MASKED_BITS 0xc0000000000000ffUL
+/*
+ * Used as an indicator for rcu callback functions
+ */
+enum pgtable_index {
+ PTE_INDEX = 0,
+ PMD_INDEX,
+ PUD_INDEX,
+ PGD_INDEX,
+};
+
extern unsigned long __vmalloc_start;
extern unsigned long __vmalloc_end;
#define VMALLOC_START __vmalloc_start
@@ -751,12 +767,14 @@ static inline bool check_pte_access(unsigned long access, unsigned long ptev)
* Generic functions with hash/radix callbacks
*/
-static inline void __ptep_set_access_flags(struct mm_struct *mm,
+static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
pte_t *ptep, pte_t entry,
- unsigned long address)
+ unsigned long address,
+ int psize)
{
if (radix_enabled())
- return radix__ptep_set_access_flags(mm, ptep, entry, address);
+ return radix__ptep_set_access_flags(vma, ptep, entry,
+ address, psize);
return hash__ptep_set_access_flags(ptep, entry);
}
diff --git a/arch/powerpc/include/asm/book3s/64/radix-4k.h b/arch/powerpc/include/asm/book3s/64/radix-4k.h
index ca366ec..863c3e8 100644
--- a/arch/powerpc/include/asm/book3s/64/radix-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/radix-4k.h
@@ -15,4 +15,7 @@
#define RADIX_PTE_FRAG_SIZE_SHIFT (RADIX_PTE_INDEX_SIZE + 3)
#define RADIX_PTE_FRAG_NR (PAGE_SIZE >> RADIX_PTE_FRAG_SIZE_SHIFT)
+#define RADIX_PMD_FRAG_SIZE_SHIFT (RADIX_PMD_INDEX_SIZE + 3)
+#define RADIX_PMD_FRAG_NR (PAGE_SIZE >> RADIX_PMD_FRAG_SIZE_SHIFT)
+
#endif /* _ASM_POWERPC_PGTABLE_RADIX_4K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/radix-64k.h b/arch/powerpc/include/asm/book3s/64/radix-64k.h
index 8300824..ccb78ca 100644
--- a/arch/powerpc/include/asm/book3s/64/radix-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/radix-64k.h
@@ -16,4 +16,8 @@
*/
#define RADIX_PTE_FRAG_SIZE_SHIFT (RADIX_PTE_INDEX_SIZE + 3)
#define RADIX_PTE_FRAG_NR (PAGE_SIZE >> RADIX_PTE_FRAG_SIZE_SHIFT)
+
+#define RADIX_PMD_FRAG_SIZE_SHIFT (RADIX_PMD_INDEX_SIZE + 3)
+#define RADIX_PMD_FRAG_NR (PAGE_SIZE >> RADIX_PMD_FRAG_SIZE_SHIFT)
+
#endif /* _ASM_POWERPC_PGTABLE_RADIX_64K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 705193e..ef9f967 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -124,23 +124,28 @@ extern void radix__mark_rodata_ro(void);
extern void radix__mark_initmem_nx(void);
#endif
+extern void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
+ pte_t entry, unsigned long address,
+ int psize);
+
static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr,
unsigned long set)
{
- pte_t pte;
- unsigned long old_pte, new_pte;
-
- do {
- pte = READ_ONCE(*ptep);
- old_pte = pte_val(pte);
- new_pte = (old_pte | set) & ~clr;
-
- } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
-
- return old_pte;
+ __be64 old_be, tmp_be;
+
+ __asm__ __volatile__(
+ "1: ldarx %0,0,%3 # pte_update\n"
+ " andc %1,%0,%5 \n"
+ " or %1,%1,%4 \n"
+ " stdcx. %1,0,%3 \n"
+ " bne- 1b"
+ : "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep)
+ : "r" (ptep), "r" (cpu_to_be64(set)), "r" (cpu_to_be64(clr))
+ : "cc" );
+
+ return be64_to_cpu(old_be);
}
-
static inline unsigned long radix__pte_update(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep, unsigned long clr,
@@ -176,48 +181,14 @@ static inline pte_t radix__ptep_get_and_clear_full(struct mm_struct *mm,
unsigned long old_pte;
if (full) {
- /*
- * If we are trying to clear the pte, we can skip
- * the DD1 pte update sequence and batch the tlb flush. The
- * tlb flush batching is done by mmu gather code. We
- * still keep the cmp_xchg update to make sure we get
- * correct R/C bit which might be updated via Nest MMU.
- */
- old_pte = __radix_pte_update(ptep, ~0ul, 0);
+ old_pte = pte_val(*ptep);
+ *ptep = __pte(0);
} else
old_pte = radix__pte_update(mm, addr, ptep, ~0ul, 0, 0);
return __pte(old_pte);
}
-/*
- * Set the dirty and/or accessed bits atomically in a linux PTE, this
- * function doesn't need to invalidate tlb.
- */
-static inline void radix__ptep_set_access_flags(struct mm_struct *mm,
- pte_t *ptep, pte_t entry,
- unsigned long address)
-{
-
- unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
- _PAGE_RW | _PAGE_EXEC);
-
- if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
-
- unsigned long old_pte, new_pte;
-
- old_pte = __radix_pte_update(ptep, ~0, 0);
- /*
- * new value of pte
- */
- new_pte = old_pte | set;
- radix__flush_tlb_pte_p9_dd1(old_pte, mm, address);
- __radix_pte_update(ptep, 0, new_pte);
- } else
- __radix_pte_update(ptep, 0, set);
- asm volatile("ptesync" : : : "memory");
-}
-
static inline int radix__pte_same(pte_t pte_a, pte_t pte_b)
{
return ((pte_raw(pte_a) ^ pte_raw(pte_b)) == 0);
@@ -232,7 +203,24 @@ static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, int percpu)
{
*ptep = pte;
- asm volatile("ptesync" : : : "memory");
+
+ /*
+ * The architecture suggests a ptesync after setting the pte, which
+ * orders the store that updates the pte with subsequent page table
+ * walk accesses which may load the pte. Without this it may be
+ * possible for a subsequent access to result in spurious fault.
+ *
+ * This is not necessary for correctness, because a spurious fault
+ * is tolerated by the page fault handler, and this store will
+ * eventually be seen. In testing, there was no noticable increase
+ * in user faults on POWER9. Avoiding ptesync here is a significant
+ * win for things like fork. If a future microarchitecture benefits
+ * from ptesync, it should probably go into update_mmu_cache, rather
+ * than set_pte_at (which is used to set ptes unrelated to faults).
+ *
+ * Spurious faults to vmalloc region are not tolerated, so there is
+ * a ptesync in flush_cache_vmap.
+ */
}
static inline int radix__pmd_bad(pmd_t pmd)
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index 19b45ba..ef5c3f2 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -51,4 +51,11 @@ extern void radix__flush_tlb_all(void);
extern void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
unsigned long address);
+extern void radix__flush_tlb_lpid_page(unsigned int lpid,
+ unsigned long addr,
+ unsigned long page_size);
+extern void radix__flush_pwc_lpid(unsigned int lpid);
+extern void radix__local_flush_tlb_lpid(unsigned int lpid);
+extern void radix__local_flush_tlb_lpid_guest(unsigned int lpid);
+
#endif
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index 0cac172..ebf572e 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -4,7 +4,7 @@
#define MMU_NO_CONTEXT ~0UL
-
+#include <linux/mm_types.h>
#include <asm/book3s/64/tlbflush-hash.h>
#include <asm/book3s/64/tlbflush-radix.h>
@@ -137,6 +137,16 @@ static inline void flush_all_mm(struct mm_struct *mm)
#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
#define flush_all_mm(mm) local_flush_all_mm(mm)
#endif /* CONFIG_SMP */
+
+#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
+static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
+ unsigned long address)
+{
+ /* See ptep_set_access_flags comment */
+ if (atomic_read(&vma->vm_mm->context.copros) > 0)
+ flush_tlb_page(vma, address);
+}
+
/*
* flush the page walk cache for the address
*/
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index c1d257a..6629846 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -9,11 +9,14 @@
#if defined(CONFIG_PPC_8xx) || defined(CONFIG_403GCX)
#define L1_CACHE_SHIFT 4
#define MAX_COPY_PREFETCH 1
+#define IFETCH_ALIGN_SHIFT 2
#elif defined(CONFIG_PPC_E500MC)
#define L1_CACHE_SHIFT 6
#define MAX_COPY_PREFETCH 4
+#define IFETCH_ALIGN_SHIFT 3
#elif defined(CONFIG_PPC32)
#define MAX_COPY_PREFETCH 4
+#define IFETCH_ALIGN_SHIFT 3 /* 603 fetches 2 insn at a time */
#if defined(CONFIG_PPC_47x)
#define L1_CACHE_SHIFT 7
#else
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index 11843e3..0d72ec7 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -23,9 +23,21 @@
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_icache_page(vma, page) do { } while (0)
-#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
+#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * Book3s has no ptesync after setting a pte, so without this ptesync it's
+ * possible for a kernel virtual mapping access to return a spurious fault
+ * if it's accessed right after the pte is set. The page fault handler does
+ * not expect this type of fault. flush_cache_vmap is not exactly the right
+ * place to put this, but it seems to work well enough.
+ */
+#define flush_cache_vmap(start, end) do { asm volatile("ptesync" ::: "memory"); } while (0)
+#else
+#define flush_cache_vmap(start, end) do { } while (0)
+#endif
+
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index 842124b..a78a57e 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -12,6 +12,8 @@
#ifdef CONFIG_GENERIC_CSUM
#include <asm-generic/checksum.h>
#else
+#include <linux/bitops.h>
+#include <linux/in6.h>
/*
* Computes the checksum of a memory block at src, length len,
* and adds in "sum" (32-bit), while copying the block to dst.
@@ -55,11 +57,7 @@ static inline __sum16 csum_fold(__wsum sum)
static inline u32 from64to32(u64 x)
{
- /* add up 32-bit and 32-bit for 32+c bit */
- x = (x & 0xffffffff) + (x >> 32);
- /* add up carry.. */
- x = (x & 0xffffffff) + (x >> 32);
- return (u32)x;
+ return (x + ror64(x, 32)) >> 32;
}
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
@@ -112,7 +110,7 @@ static inline __wsum csum_add(__wsum csum, __wsum addend)
#ifdef __powerpc64__
res += (__force u64)addend;
- return (__force __wsum) from64to32(res);
+ return (__force __wsum)((u32)res + (res >> 32));
#else
asm("addc %0,%0,%1;"
"addze %0,%0;"
@@ -214,6 +212,11 @@ static inline __sum16 ip_compute_csum(const void *buff, int len)
return csum_fold(csum_partial(buff, len, 0));
}
+#define _HAVE_ARCH_IPV6_CSUM
+__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, __u8 proto, __wsum sum);
+
#endif
#endif /* __KERNEL__ */
#endif
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 66fcab1..9c0a308 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -215,6 +215,7 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTR_P9_TM_HV_ASSIST LONG_ASM_CONST(0x0000100000000000)
#define CPU_FTR_P9_TM_XER_SO_BUG LONG_ASM_CONST(0x0000200000000000)
#define CPU_FTR_P9_TLBIE_BUG LONG_ASM_CONST(0x0000400000000000)
+#define CPU_FTR_P9_TIDR LONG_ASM_CONST(0x0000800000000000)
#ifndef __ASSEMBLY__
@@ -462,7 +463,7 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \
- CPU_FTR_P9_TLBIE_BUG)
+ CPU_FTR_P9_TLBIE_BUG | CPU_FTR_P9_TIDR)
#define CPU_FTRS_POWER9_DD1 ((CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD1) & \
(~CPU_FTR_SAO))
#define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 99b5418..bc4903b 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -47,9 +47,23 @@ static inline unsigned long cputime_to_usecs(const cputime_t ct)
* has to be populated in the new task
*/
#ifdef CONFIG_PPC64
+#define get_accounting(tsk) (&get_paca()->accounting)
static inline void arch_vtime_task_switch(struct task_struct *tsk) { }
#else
-void arch_vtime_task_switch(struct task_struct *tsk);
+#define get_accounting(tsk) (&task_thread_info(tsk)->accounting)
+/*
+ * Called from the context switch with interrupts disabled, to charge all
+ * accumulated times to the current process, and to prepare accounting on
+ * the next process.
+ */
+static inline void arch_vtime_task_switch(struct task_struct *prev)
+{
+ struct cpu_accounting_data *acct = get_accounting(current);
+ struct cpu_accounting_data *acct0 = get_accounting(prev);
+
+ acct->starttime = acct0->starttime;
+ acct->startspurr = acct0->startspurr;
+}
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index c2266ca..677102b 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -106,6 +106,9 @@ struct eeh_pe {
#define eeh_pe_for_each_dev(pe, edev, tmp) \
list_for_each_entry_safe(edev, tmp, &pe->edevs, list)
+#define eeh_for_each_pe(root, pe) \
+ for (pe = root; pe; pe = eeh_pe_next(pe, root))
+
static inline bool eeh_pe_passed(struct eeh_pe *pe)
{
return pe ? !!atomic_read(&pe->pass_dev_cnt) : false;
@@ -262,19 +265,21 @@ static inline bool eeh_state_active(int state)
== (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
}
-typedef void *(*eeh_traverse_func)(void *data, void *flag);
+typedef void *(*eeh_edev_traverse_func)(struct eeh_dev *edev, void *flag);
+typedef void *(*eeh_pe_traverse_func)(struct eeh_pe *pe, void *flag);
void eeh_set_pe_aux_size(int size);
int eeh_phb_pe_create(struct pci_controller *phb);
struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb);
+struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, struct eeh_pe *root);
struct eeh_pe *eeh_pe_get(struct pci_controller *phb,
int pe_no, int config_addr);
int eeh_add_to_parent_pe(struct eeh_dev *edev);
int eeh_rmv_from_parent_pe(struct eeh_dev *edev);
void eeh_pe_update_time_stamp(struct eeh_pe *pe);
void *eeh_pe_traverse(struct eeh_pe *root,
- eeh_traverse_func fn, void *flag);
+ eeh_pe_traverse_func fn, void *flag);
void *eeh_pe_dev_traverse(struct eeh_pe *root,
- eeh_traverse_func fn, void *flag);
+ eeh_edev_traverse_func fn, void *flag);
void eeh_pe_restore_bars(struct eeh_pe *pe);
const char *eeh_pe_loc_get(struct eeh_pe *pe);
struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe);
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index a9b64df..fcfd056 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -211,6 +211,14 @@ label##3: \
FTR_ENTRY_OFFSET 951b-952b; \
.popsection;
+#define NOSPEC_BARRIER_FIXUP_SECTION \
+953: \
+ .pushsection __barrier_nospec_fixup,"a"; \
+ .align 2; \
+954: \
+ FTR_ENTRY_OFFSET 953b-954b; \
+ .popsection;
+
#ifndef __ASSEMBLY__
#include <linux/types.h>
@@ -219,6 +227,7 @@ extern long stf_barrier_fallback;
extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
+extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
void apply_feature_fixups(void);
void setup_feature_keys(void);
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index b2dabd0..3dfb80b 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -48,9 +48,6 @@
#else /* !__ASSEMBLY__ */
extern void _mcount(void);
-#ifdef CONFIG_DYNAMIC_FTRACE
-# define FTRACE_ADDR ((unsigned long)ftrace_caller)
-# define FTRACE_REGS_ADDR FTRACE_ADDR
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
/* reloction of mcount call site is the same as the address */
@@ -60,15 +57,15 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
struct dyn_arch_ftrace {
struct module *mod;
};
-#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
#define ARCH_SUPPORTS_FTRACE_OPS 1
#endif
-#endif
+#endif /* CONFIG_FUNCTION_TRACER */
-#if defined(CONFIG_FTRACE_SYSCALLS) && !defined(__ASSEMBLY__)
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_FTRACE_SYSCALLS
/*
* Some syscall entry functions on powerpc start with "ppc_" (fork and clone,
* for instance) or ppc32_/ppc64_. We should also match the sys_ variant with
@@ -94,7 +91,25 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name
(!strncmp(sym, "ppc32_", 6) && !strcmp(sym + 6, name + 4)) ||
(!strncmp(sym, "ppc64_", 6) && !strcmp(sym + 6, name + 4));
}
-#endif
-#endif /* CONFIG_FTRACE_SYSCALLS && !__ASSEMBLY__ */
+#endif /* PPC64_ELF_ABI_v1 */
+#endif /* CONFIG_FTRACE_SYSCALLS */
+
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+
+static inline void this_cpu_disable_ftrace(void)
+{
+ get_paca()->ftrace_enabled = 0;
+}
+
+static inline void this_cpu_enable_ftrace(void)
+{
+ get_paca()->ftrace_enabled = 1;
+}
+#else /* CONFIG_PPC64 */
+static inline void this_cpu_disable_ftrace(void) { }
+static inline void this_cpu_enable_ftrace(void) { }
+#endif /* CONFIG_PPC64 */
+#endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_FTRACE */
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h
index 383f628..f1e9067 100644
--- a/arch/powerpc/include/asm/hardirq.h
+++ b/arch/powerpc/include/asm/hardirq.h
@@ -8,6 +8,7 @@
typedef struct {
unsigned int __softirq_pending;
unsigned int timer_irqs_event;
+ unsigned int broadcast_irqs_event;
unsigned int timer_irqs_others;
unsigned int pmu_irqs;
unsigned int mce_exceptions;
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 78540c0..3225eb6 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -166,22 +166,9 @@ static inline pte_t huge_pte_wrprotect(pte_t pte)
return pte_wrprotect(pte);
}
-static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep,
- pte_t pte, int dirty)
-{
-#ifdef HUGETLB_NEED_PRELOAD
- /*
- * The "return 1" forces a call of update_mmu_cache, which will write a
- * TLB entry. Without this, platforms that don't do a write of the TLB
- * entry in the TLB miss handler asm will fault ad infinitum.
- */
- ptep_set_access_flags(vma, addr, ptep, pte, dirty);
- return 1;
-#else
- return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
-#endif
-}
+extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty);
static inline pte_t huge_ptep_get(pte_t *ptep)
{
@@ -202,7 +189,7 @@ static inline void flush_hugetlb_page(struct vm_area_struct *vma,
static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
unsigned pdshift)
{
- return 0;
+ return NULL;
}
#endif /* CONFIG_HUGETLB_PAGE */
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 855e17d..e151774 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -55,6 +55,7 @@ extern void replay_system_reset(void);
extern void __replay_interrupt(unsigned int vector);
extern void timer_interrupt(struct pt_regs *);
+extern void timer_broadcast_interrupt(void);
extern void performance_monitor_exception(struct pt_regs *regs);
extern void WatchdogException(struct pt_regs *regs);
extern void unknown_exception(struct pt_regs *regs);
@@ -228,8 +229,8 @@ static inline bool arch_irqs_disabled(void)
#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory")
#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory")
#else
-#define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
-#define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1)
+#define __hard_irq_enable() __mtmsrd(MSR_EE|MSR_RI, 1)
+#define __hard_irq_disable() __mtmsrd(MSR_RI, 1)
#endif
#define hard_irq_disable() do { \
@@ -237,8 +238,12 @@ static inline bool arch_irqs_disabled(void)
__hard_irq_disable(); \
flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \
local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
- if (!arch_irqs_disabled_flags(flags)) \
+ if (!arch_irqs_disabled_flags(flags)) { \
+ asm ("stdx %%r1, 0, %1 ;" \
+ : "=m" (local_paca->saved_r1) \
+ : "b" (&local_paca->saved_r1)); \
trace_hardirqs_off(); \
+ } \
} while(0)
static inline bool lazy_irq_pending(void)
diff --git a/arch/powerpc/include/asm/imc-pmu.h b/arch/powerpc/include/asm/imc-pmu.h
index d76cb11..69f516e 100644
--- a/arch/powerpc/include/asm/imc-pmu.h
+++ b/arch/powerpc/include/asm/imc-pmu.h
@@ -128,4 +128,5 @@ extern int init_imc_pmu(struct device_node *parent,
struct imc_pmu *pmu_ptr, int pmu_id);
extern void thread_imc_disable(void);
extern int get_max_nest_dev(void);
+extern void unregister_thread_imc(void);
#endif /* __ASM_POWERPC_IMC_PMU_H */
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index af07492..e0331e7 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -367,6 +367,11 @@ static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr)
*(volatile unsigned long __force *)PCI_FIX_ADDR(addr) = v;
}
+static inline void __raw_writeq_be(unsigned long v, volatile void __iomem *addr)
+{
+ __raw_writeq((__force unsigned long)cpu_to_be64(v), addr);
+}
+
/*
* Real mode versions of the above. Those instructions are only supposed
* to be used in hypervisor real mode as per the architecture spec.
@@ -395,6 +400,11 @@ static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr)
: : "r" (val), "r" (paddr) : "memory");
}
+static inline void __raw_rm_writeq_be(u64 val, volatile void __iomem *paddr)
+{
+ __raw_rm_writeq((__force u64)cpu_to_be64(val), paddr);
+}
+
static inline u8 __raw_rm_readb(volatile void __iomem *paddr)
{
u8 ret;
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index ffe7c71..a47de82 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -83,7 +83,7 @@ struct machdep_calls {
int (*set_rtc_time)(struct rtc_time *);
void (*get_rtc_time)(struct rtc_time *);
- unsigned long (*get_boot_time)(void);
+ time64_t (*get_boot_time)(void);
unsigned char (*rtc_read_val)(int addr);
void (*rtc_write_val)(int addr, unsigned char val);
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index cda94a0..e200729 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -230,10 +230,6 @@ typedef struct {
unsigned int id;
unsigned int active;
unsigned long vdso_base;
-#ifdef CONFIG_PPC_64K_PAGES
- /* for 4K PTE fragment support */
- void *pte_frag;
-#endif
} mm_context_t;
/* Page size definitions, common between 32 and 64-bit
@@ -275,8 +271,6 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
*/
#if defined(CONFIG_PPC_4K_PAGES)
#define mmu_virtual_psize MMU_PAGE_4K
-#elif defined(CONFIG_PPC_64K_PAGES)
-#define mmu_virtual_psize MMU_PAGE_64K
#else
#error Unsupported page size
#endif
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 1835ca1..896efa5 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -250,11 +250,6 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
#define thread_pkey_regs_restore(new_thread, old_thread)
#define thread_pkey_regs_init(thread)
-static inline int vma_pkey(struct vm_area_struct *vma)
-{
- return 0;
-}
-
static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
{
return 0x0UL;
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index 4f65739..d8374f9 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -50,10 +50,6 @@ struct mod_arch_specific {
unsigned int stubs_section; /* Index of stubs section in module */
unsigned int toc_section; /* What section is the TOC? */
bool toc_fixed; /* Have we fixed up .TOC.? */
-#ifdef CONFIG_DYNAMIC_FTRACE
- unsigned long toc;
- unsigned long tramp;
-#endif
/* For module function descriptor dereference */
unsigned long start_opd;
@@ -62,10 +58,14 @@ struct mod_arch_specific {
/* Indices of PLT sections within module. */
unsigned int core_plt_section;
unsigned int init_plt_section;
+#endif /* powerpc64 */
+
#ifdef CONFIG_DYNAMIC_FTRACE
unsigned long tramp;
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ unsigned long tramp_regs;
+#endif
#endif
-#endif /* powerpc64 */
/* List of BUG addresses, source line numbers and filenames */
struct list_head bug_list;
diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h
index e94cede..ce1e0aa 100644
--- a/arch/powerpc/include/asm/mpc52xx.h
+++ b/arch/powerpc/include/asm/mpc52xx.h
@@ -350,14 +350,14 @@ extern struct mpc52xx_suspend mpc52xx_suspend;
extern int __init mpc52xx_pm_init(void);
extern int mpc52xx_set_wakeup_gpio(u8 pin, u8 level);
-#ifdef CONFIG_PPC_LITE5200
-extern int __init lite5200_pm_init(void);
-
/* lite5200 calls mpc5200 suspend functions, so here they are */
extern int mpc52xx_pm_prepare(void);
extern int mpc52xx_pm_enter(suspend_state_t);
extern void mpc52xx_pm_finish(void);
extern char saved_sram[0x4000]; /* reuse buffer from mpc52xx suspend */
+
+#ifdef CONFIG_PPC_LITE5200
+int __init lite5200_pm_init(void);
#endif
#endif /* CONFIG_PM */
diff --git a/arch/powerpc/include/asm/nmi.h b/arch/powerpc/include/asm/nmi.h
index 9c80939..0f571e0 100644
--- a/arch/powerpc/include/asm/nmi.h
+++ b/arch/powerpc/include/asm/nmi.h
@@ -8,4 +8,10 @@ extern void arch_touch_nmi_watchdog(void);
static inline void arch_touch_nmi_watchdog(void) {}
#endif
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_STACKTRACE)
+extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
+ bool exclude_self);
+#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
+#endif
+
#endif /* _ASM_NMI_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h
index 29d37bd..1707781 100644
--- a/arch/powerpc/include/asm/nohash/32/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h
@@ -100,6 +100,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
static inline void pgtable_free(void *table, unsigned index_size)
{
if (!index_size) {
+ pgtable_page_dtor(virt_to_page(table));
free_page((unsigned long)table);
} else {
BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 03bbd11..7c46a98 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -133,7 +133,7 @@ extern int icache_44x_need_flush;
#ifndef __ASSEMBLY__
#define pte_clear(mm, addr, ptep) \
- do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
+ do { pte_update(ptep, ~0, 0); } while (0)
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
@@ -146,21 +146,6 @@ static inline void pmd_clear(pmd_t *pmdp)
/*
- * When flushing the tlb entry for a page, we also need to flush the hash
- * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
- */
-extern int flush_hash_pages(unsigned context, unsigned long va,
- unsigned long pmdval, int count);
-
-/* Add an HPTE to the hash table */
-extern void add_hash_page(unsigned context, unsigned long va,
- unsigned long pmdval);
-
-/* Flush an entry from the TLB/hash table */
-extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
- unsigned long address);
-
-/*
* PTE updates. This function is called whenever an existing
* valid PTE is updated. This does -not- include set_pte_at()
* which nowadays only sets a new PTE.
@@ -246,12 +231,6 @@ static inline int __ptep_test_and_clear_young(unsigned int context, unsigned lon
{
unsigned long old;
old = pte_update(ptep, _PAGE_ACCESSED, 0);
-#if _PAGE_HASHPTE != 0
- if (old & _PAGE_HASHPTE) {
- unsigned long ptephys = __pa(ptep) & PAGE_MASK;
- flush_hash_pages(context, addr, ptephys, 1);
- }
-#endif
return (old & _PAGE_ACCESSED) != 0;
}
#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
@@ -261,7 +240,7 @@ static inline int __ptep_test_and_clear_young(unsigned int context, unsigned lon
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
+ return __pte(pte_update(ptep, ~0, 0));
}
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
@@ -277,19 +256,27 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
}
-static inline void __ptep_set_access_flags(struct mm_struct *mm,
+static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
pte_t *ptep, pte_t entry,
- unsigned long address)
+ unsigned long address,
+ int psize)
{
unsigned long set = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
unsigned long clr = ~pte_val(entry) & (_PAGE_RO | _PAGE_NA);
pte_update(ptep, clr, set);
+
+ flush_tlb_page(vma, address);
+}
+
+static inline int pte_young(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_ACCESSED;
}
#define __HAVE_ARCH_PTE_SAME
-#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
+#define pte_same(A,B) ((pte_val(A) ^ pte_val(B)) == 0)
/*
* Note that on Book E processors, the pmd contains the kernel virtual
@@ -330,7 +317,7 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
/*
* Encode and decode a swap entry.
* Note that the bits we use in a PTE for representing a swap entry
- * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
+ * must not include the _PAGE_PRESENT bit.
* -- paulus
*/
#define __swp_type(entry) ((entry).val & 0x1f)
diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h
index 124f9ac..bb4b3a4 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-40x.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h
@@ -52,12 +52,9 @@
#define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */
#define _PMD_BAD 0x802
-#define _PMD_SIZE 0x0e0 /* size field, != 0 for large-page PMD entry */
#define _PMD_SIZE_4M 0x0c0
#define _PMD_SIZE_16M 0x0e0
-#define PMD_PAGE_SIZE(pmdval) (1024 << (((pmdval) & _PMD_SIZE) >> 4))
-
/* Until my rework is finished, 40x still needs atomic PTE updates */
#define PTE_ATOMIC_UPDATES 1
diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h
index 9721c78..0e693f3 100644
--- a/arch/powerpc/include/asm/nohash/64/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h
@@ -52,8 +52,6 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
}
-#ifndef CONFIG_PPC_64K_PAGES
-
#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, (unsigned long)PUD)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
@@ -86,6 +84,18 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
#define pmd_pgtable(pmd) pmd_page(pmd)
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+ kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
+}
+
+
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
@@ -120,84 +130,47 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
__free_page(ptepage);
}
-extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
-#ifdef CONFIG_SMP
-extern void __tlb_remove_table(void *_table);
-#endif
-static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
- unsigned long address)
+static inline void pgtable_free(void *table, int shift)
{
- tlb_flush_pgtable(tlb, address);
- pgtable_free_tlb(tlb, page_address(table), 0);
+ if (!shift) {
+ pgtable_page_dtor(virt_to_page(table));
+ free_page((unsigned long)table);
+ } else {
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+ kmem_cache_free(PGT_CACHE(shift), table);
+ }
}
-#else /* if CONFIG_PPC_64K_PAGES */
-
-extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
-extern void pte_fragment_free(unsigned long *, int);
-extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
#ifdef CONFIG_SMP
-extern void __tlb_remove_table(void *_table);
-#endif
-
-#define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
-
-static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
- pte_t *pte)
+static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
{
- pmd_set(pmd, (unsigned long)pte);
-}
+ unsigned long pgf = (unsigned long)table;
-static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
- pgtable_t pte_page)
-{
- pmd_set(pmd, (unsigned long)pte_page);
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+ pgf |= shift;
+ tlb_remove_table(tlb, (void *)pgf);
}
-static inline pgtable_t pmd_pgtable(pmd_t pmd)
+static inline void __tlb_remove_table(void *_table)
{
- return (pgtable_t)(pmd_val(pmd) & ~PMD_MASKED_BITS);
-}
+ void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
+ unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
-{
- return (pte_t *)pte_fragment_alloc(mm, address, 1);
+ pgtable_free(table, shift);
}
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
-{
- return (pgtable_t)pte_fragment_alloc(mm, address, 0);
-}
-
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- pte_fragment_free((unsigned long *)pte, 1);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
+#else
+static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
{
- pte_fragment_free((unsigned long *)ptepage, 0);
+ pgtable_free(table, shift);
}
+#endif
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
tlb_flush_pgtable(tlb, address);
- pgtable_free_tlb(tlb, table, 0);
-}
-#endif /* CONFIG_PPC_64K_PAGES */
-
-static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
-{
- return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
- pgtable_gfp_flags(mm, GFP_KERNEL));
-}
-
-static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
-{
- kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
+ pgtable_free_tlb(tlb, page_address(table), 0);
}
#define __pmd_free_tlb(tlb, pmd, addr) \
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
deleted file mode 100644
index 7210c28..0000000
--- a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H
-#define _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H
-
-#define __ARCH_USE_5LEVEL_HACK
-#include <asm-generic/pgtable-nopud.h>
-
-
-#define PTE_INDEX_SIZE 8
-#define PMD_INDEX_SIZE 10
-#define PUD_INDEX_SIZE 0
-#define PGD_INDEX_SIZE 12
-
-/*
- * we support 32 fragments per PTE page of 64K size
- */
-#define PTE_FRAG_NR 32
-/*
- * We use a 2K PTE page fragment and another 2K for storing
- * real_pte_t hash index
- */
-#define PTE_FRAG_SIZE_SHIFT 11
-#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
-
-#ifndef __ASSEMBLY__
-#define PTE_TABLE_SIZE PTE_FRAG_SIZE
-#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
-#define PUD_TABLE_SIZE (0)
-#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
-#endif /* __ASSEMBLY__ */
-
-#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
-#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
-#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
-
-/* PMD_SHIFT determines what a second-level page table entry can map */
-#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
-#define PMD_SIZE (1UL << PMD_SHIFT)
-#define PMD_MASK (~(PMD_SIZE-1))
-
-/* PGDIR_SHIFT determines what a third-level page table entry can map */
-#define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
-#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
-
-/*
- * Bits to mask out from a PMD to get to the PTE page
- * PMDs point to PTE table fragments which are PTE_FRAG_SIZE aligned.
- */
-#define PMD_MASKED_BITS (PTE_FRAG_SIZE - 1)
-/* Bits to mask out from a PGD/PUD to get to the PMD page */
-#define PUD_MASKED_BITS 0x1ff
-
-#define pgd_pte(pgd) (pud_pte(((pud_t){ pgd })))
-#define pte_pgd(pte) ((pgd_t)pte_pud(pte))
-
-#endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H */
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 5c5f75d..dd0c723 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -6,13 +6,13 @@
* the ppc64 hashed page table.
*/
-#ifdef CONFIG_PPC_64K_PAGES
-#include <asm/nohash/64/pgtable-64k.h>
-#else
#include <asm/nohash/64/pgtable-4k.h>
-#endif
#include <asm/barrier.h>
+#ifdef CONFIG_PPC_64K_PAGES
+#error "Page size not supported"
+#endif
+
#define FIRST_USER_ADDRESS 0UL
/*
@@ -173,8 +173,6 @@ static inline void pgd_set(pgd_t *pgdp, unsigned long val)
/* to find an entry in a kernel page-table-directory */
/* This now only contains the vmalloc pages */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, unsigned long pte, int huge);
/* Atomic PTE updates */
static inline unsigned long pte_update(struct mm_struct *mm,
@@ -188,14 +186,12 @@ static inline unsigned long pte_update(struct mm_struct *mm,
__asm__ __volatile__(
"1: ldarx %0,0,%3 # pte_update\n\
- andi. %1,%0,%6\n\
- bne- 1b \n\
andc %1,%0,%4 \n\
- or %1,%1,%7\n\
+ or %1,%1,%6\n\
stdcx. %1,0,%3 \n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*ptep)
- : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set)
+ : "r" (ptep), "r" (clr), "m" (*ptep), "r" (set)
: "cc" );
#else
unsigned long old = pte_val(*ptep);
@@ -205,20 +201,20 @@ static inline unsigned long pte_update(struct mm_struct *mm,
if (!huge)
assert_pte_locked(mm, addr);
-#ifdef CONFIG_PPC_BOOK3S_64
- if (old & _PAGE_HASHPTE)
- hpte_need_flush(mm, addr, ptep, old, huge);
-#endif
-
return old;
}
+static inline int pte_young(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_ACCESSED;
+}
+
static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
unsigned long old;
- if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
+ if (pte_young(*ptep))
return 0;
old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
return (old & _PAGE_ACCESSED) != 0;
@@ -285,9 +281,10 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
/* Set the dirty and/or accessed bits atomically in a linux PTE, this
* function doesn't need to flush the hash entry
*/
-static inline void __ptep_set_access_flags(struct mm_struct *mm,
+static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
pte_t *ptep, pte_t entry,
- unsigned long address)
+ unsigned long address,
+ int psize)
{
unsigned long bits = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
@@ -297,22 +294,22 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
__asm__ __volatile__(
"1: ldarx %0,0,%4\n\
- andi. %1,%0,%6\n\
- bne- 1b \n\
or %0,%3,%0\n\
stdcx. %0,0,%4\n\
bne- 1b"
:"=&r" (old), "=&r" (tmp), "=m" (*ptep)
- :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY)
+ :"r" (bits), "r" (ptep), "m" (*ptep)
:"cc");
#else
unsigned long old = pte_val(*ptep);
*ptep = __pte(old | bits);
#endif
+
+ flush_tlb_page(vma, address);
}
#define __HAVE_ARCH_PTE_SAME
-#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
+#define pte_same(A,B) ((pte_val(A) ^ pte_val(B)) == 0)
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
@@ -324,11 +321,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
/* Encode and de-code a swap entry */
#define MAX_SWAPFILES_CHECK() do { \
BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
- /* \
- * Don't have overlapping bits with _PAGE_HPTEFLAGS \
- * We filter HPTEFLAGS on set_pte. \
- */ \
- BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
} while (0)
/*
* on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT;
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index c56de1e..2160be2 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -17,7 +17,6 @@ static inline int pte_write(pte_t pte)
}
static inline int pte_read(pte_t pte) { return 1; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
-static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
@@ -148,70 +147,33 @@ extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, int percpu)
{
-#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
- /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
- * helper pte_update() which does an atomic update. We need to do that
- * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
- * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
- * the hash bits instead (ie, same as the non-SMP case)
- */
- if (percpu)
- *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
- | (pte_val(pte) & ~_PAGE_HASHPTE));
- else
- pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
-
-#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
/* Second case is 32-bit with 64-bit PTE. In this case, we
* can just store as long as we do the two halves in the right order
- * with a barrier in between. This is possible because we take care,
- * in the hash code, to pre-invalidate if the PTE was already hashed,
- * which synchronizes us with any concurrent invalidation.
- * In the percpu case, we also fallback to the simple update preserving
- * the hash bits
+ * with a barrier in between.
+ * In the percpu case, we also fallback to the simple update
*/
- if (percpu) {
- *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
- | (pte_val(pte) & ~_PAGE_HASHPTE));
+ if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) {
+ __asm__ __volatile__("\
+ stw%U0%X0 %2,%0\n\
+ eieio\n\
+ stw%U0%X0 %L2,%1"
+ : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
+ : "r" (pte) : "memory");
return;
}
-#if _PAGE_HASHPTE != 0
- if (pte_val(*ptep) & _PAGE_HASHPTE)
- flush_hash_entry(mm, ptep, addr);
-#endif
- __asm__ __volatile__("\
- stw%U0%X0 %2,%0\n\
- eieio\n\
- stw%U0%X0 %L2,%1"
- : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
- : "r" (pte) : "memory");
-
-#elif defined(CONFIG_PPC_STD_MMU_32)
- /* Third case is 32-bit hash table in UP mode, we need to preserve
- * the _PAGE_HASHPTE bit since we may not have invalidated the previous
- * translation in the hash yet (done in a subsequent flush_tlb_xxx())
- * and see we need to keep track that this PTE needs invalidating
- */
- *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
- | (pte_val(pte) & ~_PAGE_HASHPTE));
-
-#else
/* Anything else just stores the PTE normally. That covers all 64-bit
* cases, and 32-bit non-hash with 32-bit PTEs.
*/
*ptep = pte;
-#ifdef CONFIG_PPC_BOOK3E_64
/*
* With hardware tablewalk, a sync is needed to ensure that
* subsequent accesses see the PTE we just wrote. Unlike userspace
* mappings, we can't tolerate spurious faults, so make sure
* the new PTE will be seen the first time.
*/
- if (is_kernel_addr(addr))
+ if (IS_ENABLED(CONFIG_PPC_BOOK3E_64) && is_kernel_addr(addr))
mb();
-#endif
-#endif
}
diff --git a/arch/powerpc/include/asm/nohash/pte-book3e.h b/arch/powerpc/include/asm/nohash/pte-book3e.h
index ccee8eb..12730b8 100644
--- a/arch/powerpc/include/asm/nohash/pte-book3e.h
+++ b/arch/powerpc/include/asm/nohash/pte-book3e.h
@@ -57,14 +57,8 @@
#define _PAGE_USER (_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */
#define _PAGE_PRIVILEGED (_PAGE_BAP_SR)
-#define _PAGE_HASHPTE 0
-#define _PAGE_BUSY 0
-
#define _PAGE_SPECIAL _PAGE_SW0
-/* Flags to be preserved on PTE modifications */
-#define _PAGE_HPTEFLAGS _PAGE_BUSY
-
/* Base page size */
#ifdef CONFIG_PPC_64K_PAGES
#define _PAGE_PSIZE _PAGE_PSIZE_64K
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index d886a5b..3bab299 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -201,13 +201,21 @@
#define OPAL_SET_POWER_SHIFT_RATIO 155
#define OPAL_SENSOR_GROUP_CLEAR 156
#define OPAL_PCI_SET_P2P 157
+#define OPAL_QUIESCE 158
#define OPAL_NPU_SPA_SETUP 159
#define OPAL_NPU_SPA_CLEAR_CACHE 160
#define OPAL_NPU_TL_SET 161
+#define OPAL_SENSOR_READ_U64 162
#define OPAL_PCI_GET_PBCQ_TUNNEL_BAR 164
#define OPAL_PCI_SET_PBCQ_TUNNEL_BAR 165
#define OPAL_LAST 165
+#define QUIESCE_HOLD 1 /* Spin all calls at entry */
+#define QUIESCE_REJECT 2 /* Fail all calls with OPAL_BUSY */
+#define QUIESCE_LOCK_BREAK 3 /* Set to ignore locks. */
+#define QUIESCE_RESUME 4 /* Un-quiesce */
+#define QUIESCE_RESUME_FAST_REBOOT 5 /* Un-quiesce, fast reboot */
+
/* Device tree flags */
/*
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 03e1a92..e1b2910 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -201,6 +201,7 @@ int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer,
int64_t opal_set_param(uint64_t token, uint32_t param_id, uint64_t buffer,
uint64_t length);
int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data);
+int64_t opal_sensor_read_u64(u32 sensor_hndl, int token, __be64 *sensor_data);
int64_t opal_handle_hmi(void);
int64_t opal_register_dump_region(uint32_t id, uint64_t start, uint64_t end);
int64_t opal_unregister_dump_region(uint32_t id);
@@ -293,6 +294,7 @@ int opal_set_power_shift_ratio(u32 handle, int token, u32 psr);
int opal_sensor_group_clear(u32 group_hndl, int token);
s64 opal_signal_system_reset(s32 cpu);
+s64 opal_quiesce(u64 shutdown_type, s32 cpu);
/* Internal functions */
extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
@@ -323,9 +325,10 @@ extern int opal_async_wait_response(uint64_t token, struct opal_msg *msg);
extern int opal_async_wait_response_interruptible(uint64_t token,
struct opal_msg *msg);
extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data);
+extern int opal_get_sensor_data_u64(u32 sensor_hndl, u64 *sensor_data);
struct rtc_time;
-extern unsigned long opal_get_boot_time(void);
+extern time64_t opal_get_boot_time(void);
extern void opal_nvram_init(void);
extern void opal_flash_update_init(void);
extern void opal_flash_update_print_message(void);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 3f109a3..6d34bd7 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -161,7 +161,7 @@ struct paca_struct {
struct task_struct *__current; /* Pointer to current */
u64 kstack; /* Saved Kernel stack addr */
u64 stab_rr; /* stab/slb round-robin counter */
- u64 saved_r1; /* r1 save for RTAS calls or PM */
+ u64 saved_r1; /* r1 save for RTAS calls or PM or EE=0 */
u64 saved_msr; /* MSR saved here by enter_rtas */
u16 trap_save; /* Used when bad stack is encountered */
u8 irq_soft_mask; /* mask for irq soft masking */
@@ -222,6 +222,7 @@ struct paca_struct {
u8 hmi_event_available; /* HMI event is available */
u8 hmi_p9_special_emu; /* HMI P9 special emulation */
#endif
+ u8 ftrace_enabled; /* Hard disable ftrace */
/* Stuff for accurate time accounting */
struct cpu_accounting_data accounting;
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index dec9ce5..db7be07 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -39,6 +39,7 @@
#ifndef __ASSEMBLY__
#ifdef CONFIG_HUGETLB_PAGE
+extern bool hugetlb_disabled;
extern unsigned int HPAGE_SHIFT;
#else
#define HPAGE_SHIFT PAGE_SHIFT
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index ab7d2d9..14c79a7 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -8,6 +8,7 @@
#include <asm/processor.h> /* For TASK_SIZE */
#include <asm/mmu.h>
#include <asm/page.h>
+#include <asm/tlbflush.h>
struct mm_struct;
diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h
index 0409c80..5ba80cf 100644
--- a/arch/powerpc/include/asm/pkeys.h
+++ b/arch/powerpc/include/asm/pkeys.h
@@ -15,19 +15,6 @@ DECLARE_STATIC_KEY_TRUE(pkey_disabled);
extern int pkeys_total; /* total pkeys as per device tree */
extern u32 initial_allocation_mask; /* bits set for reserved keys */
-/*
- * Define these here temporarily so we're not dependent on patching linux/mm.h.
- * Once it's updated we can drop these.
- */
-#ifndef VM_PKEY_BIT0
-# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
-# define VM_PKEY_BIT0 VM_HIGH_ARCH_0
-# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
-# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
-# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
-# define VM_PKEY_BIT4 VM_HIGH_ARCH_4
-#endif
-
#define ARCH_VM_PKEY_FLAGS (VM_PKEY_BIT0 | VM_PKEY_BIT1 | VM_PKEY_BIT2 | \
VM_PKEY_BIT3 | VM_PKEY_BIT4)
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index 96c1a46..cff5a41 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -39,10 +39,10 @@ static inline long extended_cede_processor(unsigned long latency_hint)
set_cede_latency_hint(latency_hint);
rc = cede_processor();
-#ifdef CONFIG_TRACE_IRQFLAGS
- /* Ensure that H_CEDE returns with IRQs on */
- if (WARN_ON(!(mfmsr() & MSR_EE)))
- __hard_irq_enable();
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+ /* Ensure that H_CEDE returns with IRQs on */
+ if (WARN_ON(!(mfmsr() & MSR_EE)))
+ __hard_irq_enable();
#endif
set_cede_latency_hint(old_latency_hint);
diff --git a/arch/powerpc/include/asm/pmac_pfunc.h b/arch/powerpc/include/asm/pmac_pfunc.h
index 73bd8f2..cee4e9f 100644
--- a/arch/powerpc/include/asm/pmac_pfunc.h
+++ b/arch/powerpc/include/asm/pmac_pfunc.h
@@ -245,6 +245,7 @@ extern void pmf_put_function(struct pmf_function *func);
extern int pmf_call_one(struct pmf_function *func, struct pmf_args *args);
+int pmac_pfunc_base_install(void);
/* Suspend/resume code called by via-pmu directly for now */
extern void pmac_pfunc_base_suspend(void);
diff --git a/arch/powerpc/include/asm/pnv-ocxl.h b/arch/powerpc/include/asm/pnv-ocxl.h
index f6945d3..208b550 100644
--- a/arch/powerpc/include/asm/pnv-ocxl.h
+++ b/arch/powerpc/include/asm/pnv-ocxl.h
@@ -28,7 +28,7 @@ extern int pnv_ocxl_map_xsl_regs(struct pci_dev *dev, void __iomem **dsisr,
extern int pnv_ocxl_spa_setup(struct pci_dev *dev, void *spa_mem, int PE_mask,
void **platform_data);
extern void pnv_ocxl_spa_release(void *platform_data);
-extern int pnv_ocxl_spa_remove_pe(void *platform_data, int pe_handle);
+extern int pnv_ocxl_spa_remove_pe_from_cache(void *platform_data, int pe_handle);
extern int pnv_ocxl_alloc_xive_irq(u32 *irq, u64 *trigger_addr);
extern void pnv_ocxl_free_xive_irq(u32 irq);
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 18883b8..4436887 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -162,6 +162,7 @@
/* VMX Vector Store Instructions */
#define OP_31_XOP_STVX 231
+#define OP_31 31
#define OP_LWZ 32
#define OP_STFS 52
#define OP_STFSU 53
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 13f7f4c..75ece56 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -80,10 +80,8 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#else
#define SAVE_GPR(n, base) stw n,GPR0+4*(n)(base)
#define REST_GPR(n, base) lwz n,GPR0+4*(n)(base)
-#define SAVE_NVGPRS(base) SAVE_GPR(13, base); SAVE_8GPRS(14, base); \
- SAVE_10GPRS(22, base)
-#define REST_NVGPRS(base) REST_GPR(13, base); REST_8GPRS(14, base); \
- REST_10GPRS(22, base)
+#define SAVE_NVGPRS(base) stmw 13, GPR0+4*13(base)
+#define REST_NVGPRS(base) lmw 13, GPR0+4*13(base)
#endif
#define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base)
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index c4b36a4..5debe33 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -249,7 +249,7 @@ struct thread_struct {
unsigned long ksp_vsid;
#endif
struct pt_regs *regs; /* Pointer to saved register state */
- mm_segment_t fs; /* for get_fs() validation */
+ mm_segment_t addr_limit; /* for get_fs() validation */
#ifdef CONFIG_BOOKE
/* BookE base exception scratch space; align on cacheline */
unsigned long normsave[8] ____cacheline_aligned;
@@ -264,10 +264,6 @@ struct thread_struct {
struct thread_fp_state *fp_save_area;
int fpexc_mode; /* floating-point exception mode */
unsigned int align_ctl; /* alignment handling control */
-#ifdef CONFIG_PPC64
- unsigned long start_tb; /* Start purr when proc switched in */
- unsigned long accum_tb; /* Total accumulated purr for process */
-#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
struct perf_event *ptrace_bps[HBP_NUM];
/*
@@ -383,7 +379,7 @@ struct thread_struct {
#define INIT_THREAD { \
.ksp = INIT_SP, \
.ksp_limit = INIT_SP_LIMIT, \
- .fs = KERNEL_DS, \
+ .addr_limit = KERNEL_DS, \
.pgdir = swapper_pg_dir, \
.fpexc_mode = MSR_FE0 | MSR_FE1, \
SPEFSCR_INIT \
@@ -392,7 +388,7 @@ struct thread_struct {
#define INIT_THREAD { \
.ksp = INIT_SP, \
.regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
- .fs = KERNEL_DS, \
+ .addr_limit = KERNEL_DS, \
.fpexc_mode = 0, \
.ppr = INIT_PPR, \
.fscr = FSCR_TAR | FSCR_EBB \
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
index c4a72c7..050b0d7 100644
--- a/arch/powerpc/include/asm/pte-common.h
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -60,10 +60,6 @@
#ifndef _PMD_PRESENT_MASK
#define _PMD_PRESENT_MASK _PMD_PRESENT
#endif
-#ifndef _PMD_SIZE
-#define _PMD_SIZE 0
-#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
-#endif
#ifndef _PMD_USER
#define _PMD_USER 0
#endif
@@ -88,11 +84,7 @@
#define _PTE_NONE_MASK _PAGE_HPTEFLAGS
#endif
-/* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a
- * kernel without large page PMD support
- */
#ifndef __ASSEMBLY__
-extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
/*
* Don't just check for any non zero bits in __PAGE_USER, since for book3e
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index cb0f272..75c5b2c 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -146,6 +146,12 @@
#define MSR_64BIT 0
#endif
+/* Condition Register related */
+#define CR0_SHIFT 28
+#define CR0_MASK 0xF
+#define CR0_TBEGIN_FAILURE (0x2 << 28) /* 0b0010 */
+
+
/* Power Management - Processor Stop Status and Control Register Fields */
#define PSSCR_RL_MASK 0x0000000F /* Requested Level */
#define PSSCR_MTL_MASK 0x000000F0 /* Maximum Transition Level */
@@ -239,13 +245,27 @@
#define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */
#define SPRN_TEXASR 0x82 /* Transaction EXception & Summary */
#define SPRN_TEXASRU 0x83 /* '' '' '' Upper 32 */
-#define TEXASR_ABORT __MASK(63-31) /* terminated by tabort or treclaim */
-#define TEXASR_SUSP __MASK(63-32) /* tx failed in suspended state */
-#define TEXASR_HV __MASK(63-34) /* MSR[HV] when failure occurred */
-#define TEXASR_PR __MASK(63-35) /* MSR[PR] when failure occurred */
-#define TEXASR_FS __MASK(63-36) /* TEXASR Failure Summary */
-#define TEXASR_EXACT __MASK(63-37) /* TFIAR value is exact */
+
+#define TEXASR_FC_LG (63 - 7) /* Failure Code */
+#define TEXASR_AB_LG (63 - 31) /* Abort */
+#define TEXASR_SU_LG (63 - 32) /* Suspend */
+#define TEXASR_HV_LG (63 - 34) /* Hypervisor state*/
+#define TEXASR_PR_LG (63 - 35) /* Privilege level */
+#define TEXASR_FS_LG (63 - 36) /* failure summary */
+#define TEXASR_EX_LG (63 - 37) /* TFIAR exact bit */
+#define TEXASR_ROT_LG (63 - 38) /* ROT bit */
+
+#define TEXASR_ABORT __MASK(TEXASR_AB_LG) /* terminated by tabort or treclaim */
+#define TEXASR_SUSP __MASK(TEXASR_SU_LG) /* tx failed in suspended state */
+#define TEXASR_HV __MASK(TEXASR_HV_LG) /* MSR[HV] when failure occurred */
+#define TEXASR_PR __MASK(TEXASR_PR_LG) /* MSR[PR] when failure occurred */
+#define TEXASR_FS __MASK(TEXASR_FS_LG) /* TEXASR Failure Summary */
+#define TEXASR_EXACT __MASK(TEXASR_EX_LG) /* TFIAR value is exact */
+#define TEXASR_ROT __MASK(TEXASR_ROT_LG)
+#define TEXASR_FC (ASM_CONST(0xFF) << TEXASR_FC_LG)
+
#define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */
+
#define SPRN_TIDR 144 /* Thread ID register */
#define SPRN_CTRLF 0x088
#define SPRN_CTRLT 0x098
diff --git a/arch/powerpc/include/asm/rheap.h b/arch/powerpc/include/asm/rheap.h
index 1723817..8e83703 100644
--- a/arch/powerpc/include/asm/rheap.h
+++ b/arch/powerpc/include/asm/rheap.h
@@ -83,6 +83,9 @@ extern int rh_get_stats(rh_info_t * info, int what, int max_stats,
/* Simple dump of remote heap info */
extern void rh_dump(rh_info_t * info);
+/* Simple dump of remote info block */
+void rh_dump_blk(rh_info_t *info, rh_block_t *blk);
+
/* Set owner of taken block */
extern int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner);
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index ec9dd79..71e393c 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -361,7 +361,7 @@ extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
extern int rtas_ibm_suspend_me(u64 handle);
struct rtc_time;
-extern unsigned long rtas_get_boot_time(void);
+extern time64_t rtas_get_boot_time(void);
extern void rtas_get_rtc_time(struct rtc_time *rtc_time);
extern int rtas_set_rtc_time(struct rtc_time *rtc_time);
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index 27fa52e..8721fd0 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -52,6 +52,15 @@ enum l1d_flush_type {
void setup_rfi_flush(enum l1d_flush_type, bool enable);
void do_rfi_flush_fixups(enum l1d_flush_type types);
+void setup_barrier_nospec(void);
+void do_barrier_nospec_fixups(bool enable);
+extern bool barrier_nospec_enabled;
+
+#ifdef CONFIG_PPC_BOOK3S_64
+void do_barrier_nospec_fixups_range(bool enable, void *start, void *end);
+#else
+static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { };
+#endif
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index cfecfee..29ffaab 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -58,6 +58,7 @@ struct smp_ops_t {
extern void smp_flush_nmi_ipi(u64 delay_us);
extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
+extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
extern void smp_send_debugger_break(void);
extern void start_secondary_resume(void);
extern void smp_generic_give_timebase(void);
diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h
index ab9d849..4547891 100644
--- a/arch/powerpc/include/asm/sstep.h
+++ b/arch/powerpc/include/asm/sstep.h
@@ -97,6 +97,8 @@ enum instruction_type {
#define SIZE(n) ((n) << 12)
#define GETSIZE(w) ((w) >> 12)
+#define GETTYPE(t) ((t) & INSTR_TYPE_MASK)
+
#define MKOP(t, f, s) ((t) | (f) | SIZE(s))
struct instruction_op {
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index be8c9fa..5b03d8a 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -94,6 +94,5 @@ static inline void clear_task_ebb(struct task_struct *t)
extern int set_thread_uses_vas(void);
extern int set_thread_tidr(struct task_struct *t);
-extern void clear_thread_tidr(struct task_struct *t);
#endif /* _ASM_POWERPC_SWITCH_TO_H */
diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h
index 1b90a35..398171f 100644
--- a/arch/powerpc/include/asm/syscalls.h
+++ b/arch/powerpc/include/asm/syscalls.h
@@ -16,7 +16,7 @@ asmlinkage long sys_mmap2(unsigned long addr, size_t len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff);
asmlinkage long ppc64_personality(unsigned long personality);
-asmlinkage int ppc_rtas(struct rtas_args __user *uargs);
+asmlinkage long sys_rtas(struct rtas_args __user *uargs);
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_SYSCALLS_H */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index d61f9c9..79a3b47 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -147,7 +147,7 @@ SYSCALL_SPU(setfsuid)
SYSCALL_SPU(setfsgid)
SYSCALL_SPU(llseek)
COMPAT_SYS_SPU(getdents)
-SYSX_SPU(sys_select,ppc32_select,sys_select)
+COMPAT_SPU_NEW(select)
SYSCALL_SPU(flock)
SYSCALL_SPU(msync)
COMPAT_SYS_SPU(readv)
@@ -245,7 +245,7 @@ SYSCALL_SPU(epoll_create)
SYSCALL_SPU(epoll_ctl)
SYSCALL_SPU(epoll_wait)
SYSCALL_SPU(remap_file_pages)
-SYSX_SPU(sys_timer_create,compat_sys_timer_create,sys_timer_create)
+COMPAT_SYS_SPU(timer_create)
COMPAT_SYS_SPU(timer_settime)
COMPAT_SYS_SPU(timer_gettime)
SYSCALL_SPU(timer_getoverrun)
@@ -260,7 +260,7 @@ COMPAT_SYS_SPU(utimes)
COMPAT_SYS_SPU(statfs64)
COMPAT_SYS_SPU(fstatfs64)
SYSX(sys_ni_syscall,ppc_fadvise64_64,ppc_fadvise64_64)
-PPC_SYS_SPU(rtas)
+SYSCALL_SPU(rtas)
OLDSYS(debug_setcontext)
SYSCALL(ni_syscall)
COMPAT_SYS(migrate_pages)
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 5964145db..f308dfe 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -79,8 +79,7 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
-#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
- TIF_NEED_RESCHED */
+#define TIF_FSCHECK 3 /* Check FS is USER_DS on return */
#define TIF_32BIT 4 /* 32 bit binary */
#define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
#define TIF_PATCH_PENDING 6 /* pending live patching update */
@@ -99,6 +98,7 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src
#if defined(CONFIG_PPC64)
#define TIF_ELF2ABI 18 /* function descriptors must die! */
#endif
+#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -118,13 +118,15 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
#define _TIF_NOHZ (1<<TIF_NOHZ)
+#define _TIF_FSCHECK (1<<TIF_FSCHECK)
#define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
_TIF_NOHZ)
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
_TIF_NOTIFY_RESUME | _TIF_UPROBE | \
- _TIF_RESTORE_TM | _TIF_PATCH_PENDING)
+ _TIF_RESTORE_TM | _TIF_PATCH_PENDING | \
+ _TIF_FSCHECK)
#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
/* Bits in local_flags */
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index db546c0..b80d492 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -26,9 +26,6 @@ extern unsigned long tb_ticks_per_usec;
extern unsigned long tb_ticks_per_sec;
extern struct clock_event_device decrementer_clockevent;
-struct rtc_time;
-extern void to_tm(int tim, struct rtc_time * tm);
-extern void tick_broadcast_ipi_handler(void);
extern void generic_calibrate_decr(void);
extern void hdec_interrupt(struct pt_regs *regs);
@@ -196,14 +193,6 @@ extern u64 mulhdu(u64, u64);
extern void div128_by_32(u64 dividend_high, u64 dividend_low,
unsigned divisor, struct div_result *dr);
-/* Used to store Processor Utilization register (purr) values */
-
-struct cpu_usage {
- u64 current_tb; /* Holds the current purr register values */
-};
-
-DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
-
extern void secondary_cpu_time_init(void);
extern void __init time_init(void);
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index a7eabff..9138bac 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -76,6 +76,19 @@ static inline int mm_is_thread_local(struct mm_struct *mm)
return false;
return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
}
+static inline void mm_reset_thread_local(struct mm_struct *mm)
+{
+ WARN_ON(atomic_read(&mm->context.copros) > 0);
+ /*
+ * It's possible for mm_access to take a reference on mm_users to
+ * access the remote mm from another thread, but it's not allowed
+ * to set mm_cpumask, so mm_users may be > 1 here.
+ */
+ WARN_ON(current->mm != mm);
+ atomic_set(&mm->context.active_cpus, 1);
+ cpumask_clear(mm_cpumask(mm));
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+}
#else /* CONFIG_PPC_BOOK3S_64 */
static inline int mm_is_thread_local(struct mm_struct *mm)
{
diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h
index b1658c9..e94f6db 100644
--- a/arch/powerpc/include/asm/tm.h
+++ b/arch/powerpc/include/asm/tm.h
@@ -10,12 +10,10 @@
#ifndef __ASSEMBLY__
-extern void tm_enable(void);
extern void tm_reclaim(struct thread_struct *thread,
uint8_t cause);
extern void tm_reclaim_current(uint8_t cause);
extern void tm_recheckpoint(struct thread_struct *thread);
-extern void tm_abort(uint8_t cause);
extern void tm_save_sprs(struct thread_struct *thread);
extern void tm_restore_sprs(struct thread_struct *thread);
diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h
index 33f3b47..d018e86 100644
--- a/arch/powerpc/include/asm/trace.h
+++ b/arch/powerpc/include/asm/trace.h
@@ -81,8 +81,7 @@ TRACE_EVENT_FN_COND(hcall_entry,
TRACE_EVENT_FN_COND(hcall_exit,
- TP_PROTO(unsigned long opcode, unsigned long retval,
- unsigned long *retbuf),
+ TP_PROTO(unsigned long opcode, long retval, unsigned long *retbuf),
TP_ARGS(opcode, retval, retbuf),
@@ -90,7 +89,7 @@ TRACE_EVENT_FN_COND(hcall_exit,
TP_STRUCT__entry(
__field(unsigned long, opcode)
- __field(unsigned long, retval)
+ __field(long, retval)
),
TP_fast_assign(
@@ -98,7 +97,7 @@ TRACE_EVENT_FN_COND(hcall_exit,
__entry->retval = retval;
),
- TP_printk("opcode=%lu retval=%lu", __entry->opcode, __entry->retval),
+ TP_printk("opcode=%lu retval=%ld", __entry->opcode, __entry->retval),
hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc
);
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index a62ee66..468653c 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -30,8 +30,14 @@
#endif
#define get_ds() (KERNEL_DS)
-#define get_fs() (current->thread.fs)
-#define set_fs(val) (current->thread.fs = (val))
+#define get_fs() (current->thread.addr_limit)
+
+static inline void set_fs(mm_segment_t fs)
+{
+ current->thread.addr_limit = fs;
+ /* On user-mode return check addr_limit (fs) is correct */
+ set_thread_flag(TIF_FSCHECK);
+}
#define segment_eq(a, b) ((a).seg == (b).seg)
@@ -252,6 +258,7 @@ do { \
__chk_user_ptr(ptr); \
if (!is_kernel_addr((unsigned long)__gu_addr)) \
might_fault(); \
+ barrier_nospec(); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
@@ -263,8 +270,10 @@ do { \
unsigned long __gu_val = 0; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
might_fault(); \
- if (access_ok(VERIFY_READ, __gu_addr, (size))) \
+ if (access_ok(VERIFY_READ, __gu_addr, (size))) { \
+ barrier_nospec(); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+ } \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
__gu_err; \
})
@@ -275,6 +284,7 @@ do { \
unsigned long __gu_val; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
+ barrier_nospec(); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
__gu_err; \
@@ -302,15 +312,19 @@ static inline unsigned long raw_copy_from_user(void *to,
switch (n) {
case 1:
+ barrier_nospec();
__get_user_size(*(u8 *)to, from, 1, ret);
break;
case 2:
+ barrier_nospec();
__get_user_size(*(u16 *)to, from, 2, ret);
break;
case 4:
+ barrier_nospec();
__get_user_size(*(u32 *)to, from, 4, ret);
break;
case 8:
+ barrier_nospec();
__get_user_size(*(u64 *)to, from, 8, ret);
break;
}
@@ -318,6 +332,7 @@ static inline unsigned long raw_copy_from_user(void *to,
return 0;
}
+ barrier_nospec();
return __copy_tofrom_user((__force void __user *)to, from, n);
}
diff --git a/arch/powerpc/include/asm/xive-regs.h b/arch/powerpc/include/asm/xive-regs.h
index fa42888..6de989f 100644
--- a/arch/powerpc/include/asm/xive-regs.h
+++ b/arch/powerpc/include/asm/xive-regs.h
@@ -123,10 +123,4 @@
#define TM_QW3_NSR_I PPC_BIT8(2)
#define TM_QW3_NSR_GRP_LVL PPC_BIT8(3,7)
-/* Utilities to manipulate these (originaly from OPAL) */
-#define MASK_TO_LSH(m) (__builtin_ffsl(m) - 1)
-#define GETFIELD(m, v) (((v) & (m)) >> MASK_TO_LSH(m))
-#define SETFIELD(m, v, val) \
- (((v) & ~(m)) | ((((typeof(v))(val)) << MASK_TO_LSH(m)) & (m)))
-
#endif /* _ASM_POWERPC_XIVE_REGS_H */
diff --git a/arch/powerpc/include/asm/xmon.h b/arch/powerpc/include/asm/xmon.h
index eb42a0c..30ff69b 100644
--- a/arch/powerpc/include/asm/xmon.h
+++ b/arch/powerpc/include/asm/xmon.h
@@ -29,7 +29,7 @@ static inline void xmon_register_spus(struct list_head *list) { };
extern int cpus_are_in_xmon(void);
#endif
-extern void xmon_printf(const char *format, ...);
+extern __printf(1, 2) void xmon_printf(const char *format, ...);
#endif /* __KERNEL __ */
#endif /* __ASM_POWERPC_XMON_H */
diff --git a/arch/powerpc/include/asm/xor.h b/arch/powerpc/include/asm/xor.h
index a36c206..7d6dc50 100644
--- a/arch/powerpc/include/asm/xor.h
+++ b/arch/powerpc/include/asm/xor.h
@@ -24,17 +24,7 @@
#include <asm/cputable.h>
#include <asm/cpu_has_feature.h>
-
-void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in);
-void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in);
-void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in);
-void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in, unsigned long *v5_in);
+#include <asm/xor_altivec.h>
static struct xor_block_template xor_block_altivec = {
.name = "altivec",
diff --git a/arch/powerpc/include/asm/xor_altivec.h b/arch/powerpc/include/asm/xor_altivec.h
new file mode 100644
index 0000000..6ca9235
--- /dev/null
+++ b/arch/powerpc/include/asm/xor_altivec.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_XOR_ALTIVEC_H
+#define _ASM_POWERPC_XOR_ALTIVEC_H
+
+#ifdef CONFIG_ALTIVEC
+
+void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in);
+void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in, unsigned long *v3_in);
+void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in, unsigned long *v3_in,
+ unsigned long *v4_in);
+void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in, unsigned long *v3_in,
+ unsigned long *v4_in, unsigned long *v5_in);
+
+#endif
+#endif /* _ASM_POWERPC_XOR_ALTIVEC_H */
OpenPOWER on IntegriCloud