summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/arm64/booting.txt10
-rw-r--r--arch/arm/include/asm/kvm_host.h5
-rw-r--r--arch/arm/kvm/psci.c2
-rw-r--r--arch/arm/mm/init.c1
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/include/asm/acpi.h19
-rw-r--r--arch/arm64/include/asm/alternative-asm.h29
-rw-r--r--arch/arm64/include/asm/alternative.h46
-rw-r--r--arch/arm64/include/asm/boot.h14
-rw-r--r--arch/arm64/include/asm/cacheflush.h5
-rw-r--r--arch/arm64/include/asm/cpu_ops.h27
-rw-r--r--arch/arm64/include/asm/cpufeature.h8
-rw-r--r--arch/arm64/include/asm/cpuidle.h8
-rw-r--r--arch/arm64/include/asm/fixmap.h15
-rw-r--r--arch/arm64/include/asm/insn.h3
-rw-r--r--arch/arm64/include/asm/io.h8
-rw-r--r--arch/arm64/include/asm/kvm_asm.h5
-rw-r--r--arch/arm64/include/asm/kvm_host.h23
-rw-r--r--arch/arm64/include/asm/mmu.h1
-rw-r--r--arch/arm64/include/asm/perf_event.h7
-rw-r--r--arch/arm64/include/asm/proc-fns.h4
-rw-r--r--arch/arm64/include/asm/processor.h19
-rw-r--r--arch/arm64/include/asm/psci.h12
-rw-r--r--arch/arm64/include/asm/smp.h2
-rw-r--r--arch/arm64/include/asm/smp_plat.h16
-rw-r--r--arch/arm64/include/asm/suspend.h2
-rw-r--r--arch/arm64/include/asm/system_misc.h14
-rw-r--r--arch/arm64/include/asm/tlbflush.h2
-rw-r--r--arch/arm64/kernel/acpi.c123
-rw-r--r--arch/arm64/kernel/alternative.c71
-rw-r--r--arch/arm64/kernel/asm-offsets.c1
-rw-r--r--arch/arm64/kernel/cpu_ops.c72
-rw-r--r--arch/arm64/kernel/cpufeature.c16
-rw-r--r--arch/arm64/kernel/cpuidle.c11
-rw-r--r--arch/arm64/kernel/entry.S37
-rw-r--r--arch/arm64/kernel/fpsimd.c31
-rw-r--r--arch/arm64/kernel/head.S52
-rw-r--r--arch/arm64/kernel/insn.c60
-rw-r--r--arch/arm64/kernel/perf_event.c2
-rw-r--r--arch/arm64/kernel/process.c62
-rw-r--r--arch/arm64/kernel/psci.c244
-rw-r--r--arch/arm64/kernel/setup.c37
-rw-r--r--arch/arm64/kernel/signal32.c4
-rw-r--r--arch/arm64/kernel/sleep.S9
-rw-r--r--arch/arm64/kernel/smp.c246
-rw-r--r--arch/arm64/kernel/smp_spin_table.c8
-rw-r--r--arch/arm64/kernel/suspend.c9
-rw-r--r--arch/arm64/kernel/traps.c5
-rw-r--r--arch/arm64/kernel/vdso/Makefile4
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S11
-rw-r--r--arch/arm64/kvm/hyp.S18
-rw-r--r--arch/arm64/mm/Makefile2
-rw-r--r--arch/arm64/mm/cache.S75
-rw-r--r--arch/arm64/mm/context.c8
-rw-r--r--arch/arm64/mm/fault.c12
-rw-r--r--arch/arm64/mm/flush.c1
-rw-r--r--arch/arm64/mm/init.c2
-rw-r--r--arch/arm64/mm/mmu.c66
-rw-r--r--arch/arm64/mm/proc.S46
-rw-r--r--arch/powerpc/kernel/prom.c1
-rw-r--r--drivers/of/fdt.c19
-rw-r--r--include/linux/of_fdt.h2
-rw-r--r--virt/kvm/arm/vgic.c3
64 files changed, 938 insertions, 741 deletions
diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt
index f3c05b5..1690350 100644
--- a/Documentation/arm64/booting.txt
+++ b/Documentation/arm64/booting.txt
@@ -45,11 +45,13 @@ sees fit.)
Requirement: MANDATORY
-The device tree blob (dtb) must be placed on an 8-byte boundary within
-the first 512 megabytes from the start of the kernel image and must not
-cross a 2-megabyte boundary. This is to allow the kernel to map the
-blob using a single section mapping in the initial page tables.
+The device tree blob (dtb) must be placed on an 8-byte boundary and must
+not exceed 2 megabytes in size. Since the dtb will be mapped cacheable
+using blocks of up to 2 megabytes in size, it must not be placed within
+any 2M region which must be mapped with any specific attributes.
+NOTE: versions prior to v4.2 also require that the DTB be placed within
+the 512 MB region starting at text_offset bytes below the kernel Image.
3. Decompress the kernel image
------------------------------
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index d71607c..e896d2c 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -218,11 +218,6 @@ static inline int kvm_arch_dev_ioctl_check_extension(long ext)
return 0;
}
-static inline void vgic_arch_setup(const struct vgic_params *vgic)
-{
- BUG_ON(vgic->type != VGIC_V2);
-}
-
int kvm_perf_init(void);
int kvm_perf_teardown(void);
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 531e922..4b94b51 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -24,6 +24,8 @@
#include <asm/kvm_psci.h>
#include <asm/kvm_host.h>
+#include <uapi/linux/psci.h>
+
/*
* This is an implementation of the Power State Coordination Interface
* as described in ARM document number ARM DEN 0022A.
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index be92fa0..8a63b4c 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -268,6 +268,7 @@ void __init arm_memblock_init(const struct machine_desc *mdesc)
if (mdesc->reserve)
mdesc->reserve();
+ early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem();
/* reserve memory for DMA contiguous allocations */
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6be1a6e..802400f 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -72,6 +72,7 @@ config ARM64
select HAVE_RCU_TABLE_FREE
select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN
+ select IRQ_FORCED_THREADING
select MODULES_USE_ELF_RELA
select NO_BOOTMEM
select OF
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 2ed7449..daefbf0 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -180,6 +180,7 @@ CONFIG_LOCKUP_DETECTOR=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_FTRACE is not set
+CONFIG_MEMTEST=y
CONFIG_SECURITY=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_ARM64_CRYPTO=y
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 59c05d8..39248d3 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -16,6 +16,7 @@
#include <linux/irqchip/arm-gic-acpi.h>
#include <asm/cputype.h>
+#include <asm/psci.h>
#include <asm/smp_plat.h>
/* Basic configuration for ACPI */
@@ -39,18 +40,6 @@ extern int acpi_disabled;
extern int acpi_noirq;
extern int acpi_pci_disabled;
-/* 1 to indicate PSCI 0.2+ is implemented */
-static inline bool acpi_psci_present(void)
-{
- return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_COMPLIANT;
-}
-
-/* 1 to indicate HVC must be used instead of SMC as the PSCI conduit */
-static inline bool acpi_psci_use_hvc(void)
-{
- return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_USE_HVC;
-}
-
static inline void disable_acpi(void)
{
acpi_disabled = 1;
@@ -88,9 +77,11 @@ static inline void arch_fix_phys_package_id(int num, u32 slot) { }
void __init acpi_init_cpus(void);
#else
-static inline bool acpi_psci_present(void) { return false; }
-static inline bool acpi_psci_use_hvc(void) { return false; }
static inline void acpi_init_cpus(void) { }
#endif /* CONFIG_ACPI */
+static inline const char *acpi_get_enable_method(int cpu)
+{
+ return acpi_psci_present() ? "psci" : NULL;
+}
#endif /*_ASM_ACPI_H*/
diff --git a/arch/arm64/include/asm/alternative-asm.h b/arch/arm64/include/asm/alternative-asm.h
deleted file mode 100644
index 919a678..0000000
--- a/arch/arm64/include/asm/alternative-asm.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __ASM_ALTERNATIVE_ASM_H
-#define __ASM_ALTERNATIVE_ASM_H
-
-#ifdef __ASSEMBLY__
-
-.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
- .word \orig_offset - .
- .word \alt_offset - .
- .hword \feature
- .byte \orig_len
- .byte \alt_len
-.endm
-
-.macro alternative_insn insn1 insn2 cap
-661: \insn1
-662: .pushsection .altinstructions, "a"
- altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
- .popsection
- .pushsection .altinstr_replacement, "ax"
-663: \insn2
-664: .popsection
- .if ((664b-663b) != (662b-661b))
- .error "Alternatives instruction length mismatch"
- .endif
-.endm
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __ASM_ALTERNATIVE_ASM_H */
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index d261f01..c385a0c 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -1,6 +1,8 @@
#ifndef __ASM_ALTERNATIVE_H
#define __ASM_ALTERNATIVE_H
+#ifndef __ASSEMBLY__
+
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/stringify.h>
@@ -24,7 +26,20 @@ void free_alternatives_memory(void);
" .byte 662b-661b\n" /* source len */ \
" .byte 664f-663f\n" /* replacement len */
-/* alternative assembly primitive: */
+/*
+ * alternative assembly primitive:
+ *
+ * If any of these .org directive fail, it means that insn1 and insn2
+ * don't have the same length. This used to be written as
+ *
+ * .if ((664b-663b) != (662b-661b))
+ * .error "Alternatives instruction length mismatch"
+ * .endif
+ *
+ * but most assemblers die if insn1 or insn2 have a .inst. This should
+ * be fixed in a binutils release posterior to 2.25.51.0.2 (anything
+ * containing commit 4e4d08cf7399b606 or c1baaddf8861).
+ */
#define ALTERNATIVE(oldinstr, newinstr, feature) \
"661:\n\t" \
oldinstr "\n" \
@@ -37,8 +52,31 @@ void free_alternatives_memory(void);
newinstr "\n" \
"664:\n\t" \
".popsection\n\t" \
- ".if ((664b-663b) != (662b-661b))\n\t" \
- " .error \"Alternatives instruction length mismatch\"\n\t"\
- ".endif\n"
+ ".org . - (664b-663b) + (662b-661b)\n\t" \
+ ".org . - (662b-661b) + (664b-663b)\n"
+
+#else
+
+.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
+ .word \orig_offset - .
+ .word \alt_offset - .
+ .hword \feature
+ .byte \orig_len
+ .byte \alt_len
+.endm
+
+.macro alternative_insn insn1 insn2 cap
+661: \insn1
+662: .pushsection .altinstructions, "a"
+ altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
+ .popsection
+ .pushsection .altinstr_replacement, "ax"
+663: \insn2
+664: .popsection
+ .org . - (664b-663b) + (662b-661b)
+ .org . - (662b-661b) + (664b-663b)
+.endm
+
+#endif /* __ASSEMBLY__ */
#endif /* __ASM_ALTERNATIVE_H */
diff --git a/arch/arm64/include/asm/boot.h b/arch/arm64/include/asm/boot.h
new file mode 100644
index 0000000..81151b6
--- /dev/null
+++ b/arch/arm64/include/asm/boot.h
@@ -0,0 +1,14 @@
+
+#ifndef __ASM_BOOT_H
+#define __ASM_BOOT_H
+
+#include <asm/sizes.h>
+
+/*
+ * arm64 requires the DTB to be 8 byte aligned and
+ * not exceed 2MB in size.
+ */
+#define MIN_FDT_ALIGN 8
+#define MAX_FDT_SIZE SZ_2M
+
+#endif
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 67d309c..c75b8d0 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -40,10 +40,6 @@
* the implementation assumes non-aliasing VIPT D-cache and (aliasing)
* VIPT or ASID-tagged VIVT I-cache.
*
- * flush_cache_all()
- *
- * Unconditionally clean and invalidate the entire cache.
- *
* flush_cache_mm(mm)
*
* Clean and invalidate all user space cache entries
@@ -69,7 +65,6 @@
* - kaddr - page address
* - size - region size
*/
-extern void flush_cache_all(void);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len);
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
index 5a31d67..8f03446 100644
--- a/arch/arm64/include/asm/cpu_ops.h
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -19,15 +19,15 @@
#include <linux/init.h>
#include <linux/threads.h>
-struct device_node;
-
/**
* struct cpu_operations - Callback operations for hotplugging CPUs.
*
* @name: Name of the property as appears in a devicetree cpu node's
- * enable-method property.
- * @cpu_init: Reads any data necessary for a specific enable-method from the
- * devicetree, for a given cpu node and proposed logical id.
+ * enable-method property. On systems booting with ACPI, @name
+ * identifies the struct cpu_operations entry corresponding to
+ * the boot protocol specified in the ACPI MADT table.
+ * @cpu_init: Reads any data necessary for a specific enable-method for a
+ * proposed logical id.
* @cpu_prepare: Early one-time preparation step for a cpu. If there is a
* mechanism for doing so, tests whether it is possible to boot
* the given CPU.
@@ -40,15 +40,15 @@ struct device_node;
* @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the
* cpu being killed.
* @cpu_kill: Ensures a cpu has left the kernel. Called from another cpu.
- * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from
- * devicetree, for a given cpu node and proposed logical id.
+ * @cpu_init_idle: Reads any data necessary to initialize CPU idle states for
+ * a proposed logical id.
* @cpu_suspend: Suspends a cpu and saves the required context. May fail owing
* to wrong parameters or error conditions. Called from the
* CPU being suspended. Must be called with IRQs disabled.
*/
struct cpu_operations {
const char *name;
- int (*cpu_init)(struct device_node *, unsigned int);
+ int (*cpu_init)(unsigned int);
int (*cpu_prepare)(unsigned int);
int (*cpu_boot)(unsigned int);
void (*cpu_postboot)(void);
@@ -58,14 +58,17 @@ struct cpu_operations {
int (*cpu_kill)(unsigned int cpu);
#endif
#ifdef CONFIG_CPU_IDLE
- int (*cpu_init_idle)(struct device_node *, unsigned int);
+ int (*cpu_init_idle)(unsigned int);
int (*cpu_suspend)(unsigned long);
#endif
};
extern const struct cpu_operations *cpu_ops[NR_CPUS];
-int __init cpu_read_ops(struct device_node *dn, int cpu);
-void __init cpu_read_bootcpu_ops(void);
-const struct cpu_operations *cpu_get_ops(const char *name);
+int __init cpu_read_ops(int cpu);
+
+static inline void __init cpu_read_bootcpu_ops(void)
+{
+ cpu_read_ops(0);
+}
#endif /* ifndef __ASM_CPU_OPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 82cb9f9..c104421 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -24,8 +24,9 @@
#define ARM64_WORKAROUND_CLEAN_CACHE 0
#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
#define ARM64_WORKAROUND_845719 2
+#define ARM64_HAS_SYSREG_GIC_CPUIF 3
-#define ARM64_NCAPS 3
+#define ARM64_NCAPS 4
#ifndef __ASSEMBLY__
@@ -38,6 +39,11 @@ struct arm64_cpu_capabilities {
u32 midr_model;
u32 midr_range_min, midr_range_max;
};
+
+ struct { /* Feature register checking */
+ u64 register_mask;
+ u64 register_value;
+ };
};
};
diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h
index 141b2fc..0f74f05 100644
--- a/arch/arm64/include/asm/cpuidle.h
+++ b/arch/arm64/include/asm/cpuidle.h
@@ -5,20 +5,16 @@
#ifdef CONFIG_CPU_IDLE
extern int arm_cpuidle_init(unsigned int cpu);
-extern int cpu_suspend(unsigned long arg);
+extern int arm_cpuidle_suspend(int index);
#else
static inline int arm_cpuidle_init(unsigned int cpu)
{
return -EOPNOTSUPP;
}
-static inline int cpu_suspend(unsigned long arg)
+static inline int arm_cpuidle_suspend(int index)
{
return -EOPNOTSUPP;
}
#endif
-static inline int arm_cpuidle_suspend(int index)
-{
- return cpu_suspend(index);
-}
#endif
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index 95e6b6d..c0739187 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -17,6 +17,7 @@
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
+#include <asm/boot.h>
#include <asm/page.h>
/*
@@ -32,6 +33,20 @@
*/
enum fixed_addresses {
FIX_HOLE,
+
+ /*
+ * Reserve a virtual window for the FDT that is 2 MB larger than the
+ * maximum supported size, and put it at the top of the fixmap region.
+ * The additional space ensures that any FDT that does not exceed
+ * MAX_FDT_SIZE can be mapped regardless of whether it crosses any
+ * 2 MB alignment boundaries.
+ *
+ * Keep this at the top so it remains 2 MB aligned.
+ */
+#define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M)
+ FIX_FDT_END,
+ FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1,
+
FIX_EARLYCON_MEM_BASE,
FIX_TEXT_POKE0,
__end_of_permanent_fixed_addresses,
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index f81b328..30e50eb 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -281,6 +281,7 @@ __AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000)
#undef __AARCH64_INSN_FUNCS
bool aarch64_insn_is_nop(u32 insn);
+bool aarch64_insn_is_branch_imm(u32 insn);
int aarch64_insn_read(void *addr, u32 *insnp);
int aarch64_insn_write(void *addr, u32 insn);
@@ -351,6 +352,8 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
int shift,
enum aarch64_insn_variant variant,
enum aarch64_insn_logic_type type);
+s32 aarch64_get_branch_offset(u32 insn);
+u32 aarch64_set_branch_offset(u32 insn, s32 offset);
bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 7116d39..44be1e0 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -117,10 +117,10 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* ordering rules but do not guarantee any ordering relative to Normal memory
* accesses.
*/
-#define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; })
-#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw(c)); __v; })
-#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl(c)); __v; })
-#define readq_relaxed(c) ({ u64 __v = le64_to_cpu((__force __le64)__raw_readq(c)); __v; })
+#define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; })
+#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
+#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
+#define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
#define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c)))
#define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c)))
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index c4c11d2..3c5fe68 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -132,11 +132,6 @@ extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
extern u64 __vgic_v3_get_ich_vtr_el2(void);
-extern char __save_vgic_v2_state[];
-extern char __restore_vgic_v2_state[];
-extern char __save_vgic_v3_state[];
-extern char __restore_vgic_v3_state[];
-
#endif
#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index f0f58c9..2709db2 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -221,29 +221,6 @@ struct vgic_sr_vectors {
void *restore_vgic;
};
-static inline void vgic_arch_setup(const struct vgic_params *vgic)
-{
- extern struct vgic_sr_vectors __vgic_sr_vectors;
-
- switch(vgic->type)
- {
- case VGIC_V2:
- __vgic_sr_vectors.save_vgic = __save_vgic_v2_state;
- __vgic_sr_vectors.restore_vgic = __restore_vgic_v2_state;
- break;
-
-#ifdef CONFIG_ARM_GIC_V3
- case VGIC_V3:
- __vgic_sr_vectors.save_vgic = __save_vgic_v3_state;
- __vgic_sr_vectors.restore_vgic = __restore_vgic_v3_state;
- break;
-#endif
-
- default:
- BUG();
- }
-}
-
static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 3d31176..79fcfb0 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -34,5 +34,6 @@ extern void init_mem_pgprot(void);
extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot);
+extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
#endif
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index d26d1d5..6471773 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -24,4 +24,11 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_misc_flags(regs) perf_misc_flags(regs)
#endif
+#define perf_arch_fetch_caller_regs(regs, __ip) { \
+ (regs)->pc = (__ip); \
+ (regs)->regs[29] = (unsigned long) __builtin_frame_address(0); \
+ (regs)->sp = current_stack_pointer; \
+ (regs)->pstate = PSR_MODE_EL1h; \
+}
+
#endif
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 220633b7..14ad6e4 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -28,12 +28,8 @@
struct mm_struct;
struct cpu_suspend_ctx;
-extern void cpu_cache_off(void);
extern void cpu_do_idle(void);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
-extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
-void cpu_soft_restart(phys_addr_t cpu_reset,
- unsigned long addr) __attribute__((noreturn));
extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index d2c37a1..e4c893e 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -78,13 +78,30 @@ struct cpu_context {
struct thread_struct {
struct cpu_context cpu_context; /* cpu context */
- unsigned long tp_value;
+ unsigned long tp_value; /* TLS register */
+#ifdef CONFIG_COMPAT
+ unsigned long tp2_value;
+#endif
struct fpsimd_state fpsimd_state;
unsigned long fault_address; /* fault info */
unsigned long fault_code; /* ESR_EL1 value */
struct debug_info debug; /* debugging */
};
+#ifdef CONFIG_COMPAT
+#define task_user_tls(t) \
+({ \
+ unsigned long *__tls; \
+ if (is_compat_thread(task_thread_info(t))) \
+ __tls = &(t)->thread.tp2_value; \
+ else \
+ __tls = &(t)->thread.tp_value; \
+ __tls; \
+ })
+#else
+#define task_user_tls(t) (&(t)->thread.tp_value)
+#endif
+
#define INIT_THREAD { }
static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h
index 2454bc5..49d7e1a 100644
--- a/arch/arm64/include/asm/psci.h
+++ b/arch/arm64/include/asm/psci.h
@@ -14,7 +14,15 @@
#ifndef __ASM_PSCI_H
#define __ASM_PSCI_H
-int psci_dt_init(void);
-int psci_acpi_init(void);
+int __init psci_dt_init(void);
+
+#ifdef CONFIG_ACPI
+int __init psci_acpi_init(void);
+bool __init acpi_psci_present(void);
+bool __init acpi_psci_use_hvc(void);
+#else
+static inline int psci_acpi_init(void) { return 0; }
+static inline bool acpi_psci_present(void) { return false; }
+#endif
#endif /* __ASM_PSCI_H */
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index bf22650..db02be8 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -42,7 +42,7 @@ extern void handle_IPI(int ipinr, struct pt_regs *regs);
* Discover the set of possible CPUs and determine their
* SMP operations.
*/
-extern void of_smp_init_cpus(void);
+extern void smp_init_cpus(void);
/*
* Provide a function to raise an IPI cross call on CPUs in callmap.
diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
index 8dcd61e..7abf757 100644
--- a/arch/arm64/include/asm/smp_plat.h
+++ b/arch/arm64/include/asm/smp_plat.h
@@ -19,6 +19,8 @@
#ifndef __ASM_SMP_PLAT_H
#define __ASM_SMP_PLAT_H
+#include <linux/cpumask.h>
+
#include <asm/types.h>
struct mpidr_hash {
@@ -39,6 +41,20 @@ static inline u32 mpidr_hash_size(void)
*/
extern u64 __cpu_logical_map[NR_CPUS];
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
+/*
+ * Retrieve logical cpu index corresponding to a given MPIDR.Aff*
+ * - mpidr: MPIDR.Aff* bits to be used for the look-up
+ *
+ * Returns the cpu logical index or -EINVAL on look-up error
+ */
+static inline int get_logical_index(u64 mpidr)
+{
+ int cpu;
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+ if (cpu_logical_map(cpu) == mpidr)
+ return cpu;
+ return -EINVAL;
+}
void __init do_post_cpus_up_work(void);
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
index 003802f..59a5b0f1 100644
--- a/arch/arm64/include/asm/suspend.h
+++ b/arch/arm64/include/asm/suspend.h
@@ -21,6 +21,6 @@ struct sleep_save_sp {
phys_addr_t save_ptr_stash_phys;
};
-extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
+extern int cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
extern void cpu_resume(void);
#endif
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index 7a18fab..57f110b 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -23,6 +23,8 @@
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/irqflags.h>
+#include <linux/signal.h>
+#include <linux/ratelimit.h>
#include <linux/reboot.h>
struct pt_regs;
@@ -41,9 +43,19 @@ struct mm_struct;
extern void show_pte(struct mm_struct *mm, unsigned long addr);
extern void __show_regs(struct pt_regs *);
-void soft_restart(unsigned long);
extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
+#define show_unhandled_signals_ratelimited() \
+({ \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ bool __show_ratelimited = false; \
+ if (show_unhandled_signals && __ratelimit(&_rs)) \
+ __show_ratelimited = true; \
+ __show_ratelimited; \
+})
+
#define UDBG_UNDEFINED (1 << 0)
#define UDBG_SYSCALL (1 << 1)
#define UDBG_BADABORT (1 << 2)
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index c3bb05b..934815d 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -28,8 +28,6 @@
* TLB Management
* ==============
*
- * The arch/arm64/mm/tlb.S files implement these methods.
- *
* The TLB specific code is expected to perform whatever tests it needs
* to determine if it should invalidate the TLB for each call. Start
* addresses are inclusive and end addresses are exclusive; it is safe to
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index 8b83955..19de753 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -36,12 +36,6 @@ EXPORT_SYMBOL(acpi_disabled);
int acpi_pci_disabled = 1; /* skip ACPI PCI scan and IRQ initialization */
EXPORT_SYMBOL(acpi_pci_disabled);
-/* Processors with enabled flag and sane MPIDR */
-static int enabled_cpus;
-
-/* Boot CPU is valid or not in MADT */
-static bool bootcpu_valid __initdata;
-
static bool param_acpi_off __initdata;
static bool param_acpi_force __initdata;
@@ -95,122 +89,15 @@ void __init __acpi_unmap_table(char *map, unsigned long size)
early_memunmap(map, size);
}
-/**
- * acpi_map_gic_cpu_interface - generates a logical cpu number
- * and map to MPIDR represented by GICC structure
- */
-static void __init
-acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
+bool __init acpi_psci_present(void)
{
- int i;
- u64 mpidr = processor->arm_mpidr & MPIDR_HWID_BITMASK;
- bool enabled = !!(processor->flags & ACPI_MADT_ENABLED);
-
- if (mpidr == INVALID_HWID) {
- pr_info("Skip MADT cpu entry with invalid MPIDR\n");
- return;
- }
-
- total_cpus++;
- if (!enabled)
- return;
-
- if (enabled_cpus >= NR_CPUS) {
- pr_warn("NR_CPUS limit of %d reached, Processor %d/0x%llx ignored.\n",
- NR_CPUS, total_cpus, mpidr);
- return;
- }
-
- /* Check if GICC structure of boot CPU is available in the MADT */
- if (cpu_logical_map(0) == mpidr) {
- if (bootcpu_valid) {
- pr_err("Firmware bug, duplicate CPU MPIDR: 0x%llx in MADT\n",
- mpidr);
- return;
- }
-
- bootcpu_valid = true;
- }
-
- /*
- * Duplicate MPIDRs are a recipe for disaster. Scan
- * all initialized entries and check for
- * duplicates. If any is found just ignore the CPU.
- */
- for (i = 1; i < enabled_cpus; i++) {
- if (cpu_logical_map(i) == mpidr) {
- pr_err("Firmware bug, duplicate CPU MPIDR: 0x%llx in MADT\n",
- mpidr);
- return;
- }
- }
-
- if (!acpi_psci_present())
- return;
-
- cpu_ops[enabled_cpus] = cpu_get_ops("psci");
- /* CPU 0 was already initialized */
- if (enabled_cpus) {
- if (!cpu_ops[enabled_cpus])
- return;
-
- if (cpu_ops[enabled_cpus]->cpu_init(NULL, enabled_cpus))
- return;
-
- /* map the logical cpu id to cpu MPIDR */
- cpu_logical_map(enabled_cpus) = mpidr;
- }
-
- enabled_cpus++;
+ return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_COMPLIANT;
}
-static int __init
-acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
- const unsigned long end)
+/* Whether HVC must be used instead of SMC as the PSCI conduit */
+bool __init acpi_psci_use_hvc(void)
{
- struct acpi_madt_generic_interrupt *processor;
-
- processor = (struct acpi_madt_generic_interrupt *)header;
-
- if (BAD_MADT_ENTRY(processor, end))
- return -EINVAL;
-
- acpi_table_print_madt_entry(header);
- acpi_map_gic_cpu_interface(processor);
- return 0;
-}
-
-/* Parse GIC cpu interface entries in MADT for SMP init */
-void __init acpi_init_cpus(void)
-{
- int count, i;
-
- /*
- * do a partial walk of MADT to determine how many CPUs
- * we have including disabled CPUs, and get information
- * we need for SMP init
- */
- count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
- acpi_parse_gic_cpu_interface, 0);
-
- if (!count) {
- pr_err("No GIC CPU interface entries present\n");
- return;
- } else if (count < 0) {
- pr_err("Error parsing GIC CPU interface entry\n");
- return;
- }
-
- if (!bootcpu_valid) {
- pr_err("MADT missing boot CPU MPIDR, not enabling secondaries\n");
- return;
- }
-
- for (i = 0; i < enabled_cpus; i++)
- set_cpu_possible(i, true);
-
- /* Make boot-up look pretty */
- pr_info("%d CPUs enabled, %d CPUs total\n", enabled_cpus, total_cpus);
+ return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_USE_HVC;
}
/*
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 28f8365..221b983 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -24,8 +24,13 @@
#include <asm/cacheflush.h>
#include <asm/alternative.h>
#include <asm/cpufeature.h>
+#include <asm/insn.h>
#include <linux/stop_machine.h>
+#define __ALT_PTR(a,f) (u32 *)((void *)&(a)->f + (a)->f)
+#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
+#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
+
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
struct alt_region {
@@ -33,13 +38,63 @@ struct alt_region {
struct alt_instr *end;
};
+/*
+ * Check if the target PC is within an alternative block.
+ */
+static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
+{
+ unsigned long replptr;
+
+ if (kernel_text_address(pc))
+ return 1;
+
+ replptr = (unsigned long)ALT_REPL_PTR(alt);
+ if (pc >= replptr && pc <= (replptr + alt->alt_len))
+ return 0;
+
+ /*
+ * Branching into *another* alternate sequence is doomed, and
+ * we're not even trying to fix it up.
+ */
+ BUG();
+}
+
+static u32 get_alt_insn(struct alt_instr *alt, u32 *insnptr, u32 *altinsnptr)
+{
+ u32 insn;
+
+ insn = le32_to_cpu(*altinsnptr);
+
+ if (aarch64_insn_is_branch_imm(insn)) {
+ s32 offset = aarch64_get_branch_offset(insn);
+ unsigned long target;
+
+ target = (unsigned long)altinsnptr + offset;
+
+ /*
+ * If we're branching inside the alternate sequence,
+ * do not rewrite the instruction, as it is already
+ * correct. Otherwise, generate the new instruction.
+ */
+ if (branch_insn_requires_update(alt, target)) {
+ offset = target - (unsigned long)insnptr;
+ insn = aarch64_set_branch_offset(insn, offset);
+ }
+ }
+
+ return insn;
+}
+
static int __apply_alternatives(void *alt_region)
{
struct alt_instr *alt;
struct alt_region *region = alt_region;
- u8 *origptr, *replptr;
+ u32 *origptr, *replptr;
for (alt = region->begin; alt < region->end; alt++) {
+ u32 insn;
+ int i, nr_inst;
+
if (!cpus_have_cap(alt->cpufeature))
continue;
@@ -47,11 +102,17 @@ static int __apply_alternatives(void *alt_region)
pr_info_once("patching kernel code\n");
- origptr = (u8 *)&alt->orig_offset + alt->orig_offset;
- replptr = (u8 *)&alt->alt_offset + alt->alt_offset;
- memcpy(origptr, replptr, alt->alt_len);
+ origptr = ALT_ORIG_PTR(alt);
+ replptr = ALT_REPL_PTR(alt);
+ nr_inst = alt->alt_len / sizeof(insn);
+
+ for (i = 0; i < nr_inst; i++) {
+ insn = get_alt_insn(alt, origptr + i, replptr + i);
+ *(origptr + i) = cpu_to_le32(insn);
+ }
+
flush_icache_range((uintptr_t)origptr,
- (uintptr_t)(origptr + alt->alt_len));
+ (uintptr_t)(origptr + nr_inst));
}
return 0;
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index da675cc..c99701a 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -127,7 +127,6 @@ int main(void)
DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
DEFINE(VGIC_SAVE_FN, offsetof(struct vgic_sr_vectors, save_vgic));
DEFINE(VGIC_RESTORE_FN, offsetof(struct vgic_sr_vectors, restore_vgic));
- DEFINE(VGIC_SR_VECTOR_SZ, sizeof(struct vgic_sr_vectors));
DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index fb8ff9b..5ea337d 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -16,11 +16,13 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <asm/cpu_ops.h>
-#include <asm/smp_plat.h>
+#include <linux/acpi.h>
#include <linux/errno.h>
#include <linux/of.h>
#include <linux/string.h>
+#include <asm/acpi.h>
+#include <asm/cpu_ops.h>
+#include <asm/smp_plat.h>
extern const struct cpu_operations smp_spin_table_ops;
extern const struct cpu_operations cpu_psci_ops;
@@ -35,7 +37,7 @@ static const struct cpu_operations *supported_cpu_ops[] __initconst = {
NULL,
};
-const struct cpu_operations * __init cpu_get_ops(const char *name)
+static const struct cpu_operations * __init cpu_get_ops(const char *name)
{
const struct cpu_operations **ops = supported_cpu_ops;
@@ -49,39 +51,53 @@ const struct cpu_operations * __init cpu_get_ops(const char *name)
return NULL;
}
+static const char *__init cpu_read_enable_method(int cpu)
+{
+ const char *enable_method;
+
+ if (acpi_disabled) {
+ struct device_node *dn = of_get_cpu_node(cpu, NULL);
+
+ if (!dn) {
+ if (!cpu)
+ pr_err("Failed to find device node for boot cpu\n");
+ return NULL;
+ }
+
+ enable_method = of_get_property(dn, "enable-method", NULL);
+ if (!enable_method) {
+ /*
+ * The boot CPU may not have an enable method (e.g.
+ * when spin-table is used for secondaries).
+ * Don't warn spuriously.
+ */
+ if (cpu != 0)
+ pr_err("%s: missing enable-method property\n",
+ dn->full_name);
+ }
+ } else {
+ enable_method = acpi_get_enable_method(cpu);
+ if (!enable_method)
+ pr_err("Unsupported ACPI enable-method\n");
+ }
+
+ return enable_method;
+}
/*
- * Read a cpu's enable method from the device tree and record it in cpu_ops.
+ * Read a cpu's enable method and record it in cpu_ops.
*/
-int __init cpu_read_ops(struct device_node *dn, int cpu)
+int __init cpu_read_ops(int cpu)
{
- const char *enable_method = of_get_property(dn, "enable-method", NULL);
- if (!enable_method) {
- /*
- * The boot CPU may not have an enable method (e.g. when
- * spin-table is used for secondaries). Don't warn spuriously.
- */
- if (cpu != 0)
- pr_err("%s: missing enable-method property\n",
- dn->full_name);
- return -ENOENT;
- }
+ const char *enable_method = cpu_read_enable_method(cpu);
+
+ if (!enable_method)
+ return -ENODEV;
cpu_ops[cpu] = cpu_get_ops(enable_method);
if (!cpu_ops[cpu]) {
- pr_warn("%s: unsupported enable-method property: %s\n",
- dn->full_name, enable_method);
+ pr_warn("Unsupported enable-method: %s\n", enable_method);
return -EOPNOTSUPP;
}
return 0;
}
-
-void __init cpu_read_bootcpu_ops(void)
-{
- struct device_node *dn = of_get_cpu_node(0, NULL);
- if (!dn) {
- pr_err("Failed to find device node for boot cpu\n");
- return;
- }
- cpu_read_ops(dn, 0);
-}
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 3d9967e..5ad86ce 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -22,7 +22,23 @@
#include <asm/cpu.h>
#include <asm/cpufeature.h>
+static bool
+has_id_aa64pfr0_feature(const struct arm64_cpu_capabilities *entry)
+{
+ u64 val;
+
+ val = read_cpuid(id_aa64pfr0_el1);
+ return (val & entry->register_mask) == entry->register_value;
+}
+
static const struct arm64_cpu_capabilities arm64_features[] = {
+ {
+ .desc = "GIC system register CPU interface",
+ .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
+ .matches = has_id_aa64pfr0_feature,
+ .register_mask = (0xf << 24),
+ .register_value = (1 << 24),
+ },
{},
};
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index a78143a..7ce589c 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -18,15 +18,10 @@
int arm_cpuidle_init(unsigned int cpu)
{
int ret = -EOPNOTSUPP;
- struct device_node *cpu_node = of_cpu_device_node_get(cpu);
-
- if (!cpu_node)
- return -ENODEV;
if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_init_idle)
- ret = cpu_ops[cpu]->cpu_init_idle(cpu_node, cpu);
+ ret = cpu_ops[cpu]->cpu_init_idle(cpu);
- of_node_put(cpu_node);
return ret;
}
@@ -37,7 +32,7 @@ int arm_cpuidle_init(unsigned int cpu)
* Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
* operations back-end error code otherwise.
*/
-int cpu_suspend(unsigned long arg)
+int arm_cpuidle_suspend(int index)
{
int cpu = smp_processor_id();
@@ -47,5 +42,5 @@ int cpu_suspend(unsigned long arg)
*/
if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
return -EOPNOTSUPP;
- return cpu_ops[cpu]->cpu_suspend(arg);
+ return cpu_ops[cpu]->cpu_suspend(index);
}
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 959fe87..a7691a3 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -21,7 +21,7 @@
#include <linux/init.h>
#include <linux/linkage.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/cpufeature.h>
@@ -124,21 +124,24 @@
msr sp_el0, x23
#ifdef CONFIG_ARM64_ERRATUM_845719
- alternative_insn \
- "nop", \
- "tbz x22, #4, 1f", \
- ARM64_WORKAROUND_845719
+
+#undef SEQUENCE_ORG
+#undef SEQUENCE_ALT
+
#ifdef CONFIG_PID_IN_CONTEXTIDR
- alternative_insn \
- "nop; nop", \
- "mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:", \
- ARM64_WORKAROUND_845719
+
+#define SEQUENCE_ORG "nop ; nop ; nop"
+#define SEQUENCE_ALT "tbz x22, #4, 1f ; mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:"
+
#else
- alternative_insn \
- "nop", \
- "msr contextidr_el1, xzr; 1:", \
- ARM64_WORKAROUND_845719
+
+#define SEQUENCE_ORG "nop ; nop"
+#define SEQUENCE_ALT "tbz x22, #4, 1f ; msr contextidr_el1, xzr; 1:"
+
#endif
+
+ alternative_insn SEQUENCE_ORG, SEQUENCE_ALT, ARM64_WORKAROUND_845719
+
#endif
.endif
msr elr_el1, x21 // set up the return data
@@ -517,6 +520,7 @@ el0_sp_pc:
mrs x26, far_el1
// enable interrupts before calling the main handler
enable_dbg_and_irq
+ ct_user_exit
mov x0, x26
mov x1, x25
mov x2, sp
@@ -608,11 +612,16 @@ ENDPROC(cpu_switch_to)
*/
ret_fast_syscall:
disable_irq // disable interrupts
- ldr x1, [tsk, #TI_FLAGS]
+ ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing
+ and x2, x1, #_TIF_SYSCALL_WORK
+ cbnz x2, ret_fast_syscall_trace
and x2, x1, #_TIF_WORK_MASK
cbnz x2, fast_work_pending
enable_step_tsk x1, x2
kernel_exit 0, ret = 1
+ret_fast_syscall_trace:
+ enable_irq // enable interrupts
+ b __sys_trace_return
/*
* Ok, we need to do extra processing, enter the slow path.
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 3dca156..44d6f75 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -296,6 +297,35 @@ static void fpsimd_pm_init(void)
static inline void fpsimd_pm_init(void) { }
#endif /* CONFIG_CPU_PM */
+#ifdef CONFIG_HOTPLUG_CPU
+static int fpsimd_cpu_hotplug_notifier(struct notifier_block *nfb,
+ unsigned long action,
+ void *hcpu)
+{
+ unsigned int cpu = (long)hcpu;
+
+ switch (action) {
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ per_cpu(fpsimd_last_state, cpu) = NULL;
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block fpsimd_cpu_hotplug_notifier_block = {
+ .notifier_call = fpsimd_cpu_hotplug_notifier,
+};
+
+static inline void fpsimd_hotplug_init(void)
+{
+ register_cpu_notifier(&fpsimd_cpu_hotplug_notifier_block);
+}
+
+#else
+static inline void fpsimd_hotplug_init(void) { }
+#endif
+
/*
* FP/SIMD support code initialisation.
*/
@@ -315,6 +345,7 @@ static int __init fpsimd_init(void)
elf_hwcap |= HWCAP_ASIMD;
fpsimd_pm_init();
+ fpsimd_hotplug_init();
return 0;
}
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 19f915e..c0ff3ce 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -237,8 +237,6 @@ ENTRY(stext)
bl el2_setup // Drop to EL1, w20=cpu_boot_mode
adrp x24, __PHYS_OFFSET
bl set_cpu_boot_mode_flag
-
- bl __vet_fdt
bl __create_page_tables // x25=TTBR0, x26=TTBR1
/*
* The following calls CPU setup code, see arch/arm64/mm/proc.S for
@@ -270,24 +268,6 @@ preserve_boot_args:
ENDPROC(preserve_boot_args)
/*
- * Determine validity of the x21 FDT pointer.
- * The dtb must be 8-byte aligned and live in the first 512M of memory.
- */
-__vet_fdt:
- tst x21, #0x7
- b.ne 1f
- cmp x21, x24
- b.lt 1f
- mov x0, #(1 << 29)
- add x0, x0, x24
- cmp x21, x0
- b.ge 1f
- ret
-1:
- mov x21, #0
- ret
-ENDPROC(__vet_fdt)
-/*
* Macro to create a table entry to the next page.
*
* tbl: page table address
@@ -348,8 +328,7 @@ ENDPROC(__vet_fdt)
* required to get the kernel running. The following sections are required:
* - identity mapping to enable the MMU (low address, TTBR0)
* - first few MB of the kernel linear mapping to jump to once the MMU has
- * been enabled, including the FDT blob (TTBR1)
- * - pgd entry for fixed mappings (TTBR1)
+ * been enabled
*/
__create_page_tables:
adrp x25, idmap_pg_dir
@@ -382,7 +361,7 @@ __create_page_tables:
* Create the identity mapping.
*/
mov x0, x25 // idmap_pg_dir
- adrp x3, KERNEL_START // __pa(KERNEL_START)
+ adrp x3, __idmap_text_start // __pa(__idmap_text_start)
#ifndef CONFIG_ARM64_VA_BITS_48
#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
@@ -405,11 +384,11 @@ __create_page_tables:
/*
* Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
- * entire kernel image can be ID mapped. As T0SZ == (64 - #bits used),
+ * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
* this number conveniently equals the number of leading zeroes in
- * the physical address of KERNEL_END.
+ * the physical address of __idmap_text_end.
*/
- adrp x5, KERNEL_END
+ adrp x5, __idmap_text_end
clz x5, x5
cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
b.ge 1f // .. then skip additional level
@@ -424,8 +403,8 @@ __create_page_tables:
#endif
create_pgd_entry x0, x3, x5, x6
- mov x5, x3 // __pa(KERNEL_START)
- adr_l x6, KERNEL_END // __pa(KERNEL_END)
+ mov x5, x3 // __pa(__idmap_text_start)
+ adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
create_block_map x0, x7, x3, x5, x6
/*
@@ -439,22 +418,6 @@ __create_page_tables:
create_block_map x0, x7, x3, x5, x6
/*
- * Map the FDT blob (maximum 2MB; must be within 512MB of
- * PHYS_OFFSET).
- */
- mov x3, x21 // FDT phys address
- and x3, x3, #~((1 << 21) - 1) // 2MB aligned
- mov x6, #PAGE_OFFSET
- sub x5, x3, x24 // subtract PHYS_OFFSET
- tst x5, #~((1 << 29) - 1) // within 512MB?
- csel x21, xzr, x21, ne // zero the FDT pointer
- b.ne 1f
- add x5, x5, x6 // __va(FDT blob)
- add x6, x5, #1 << 21 // 2MB for the FDT blob
- sub x6, x6, #1 // inclusive range
- create_block_map x0, x7, x3, x5, x6
-1:
- /*
* Since the page tables have been populated with non-cacheable
* accesses (MMU disabled), invalidate the idmap and swapper page
* tables again to remove any speculatively loaded cache lines.
@@ -669,6 +632,7 @@ ENDPROC(__secondary_switched)
*
* other registers depend on the function called upon completion
*/
+ .section ".idmap.text", "ax"
__enable_mmu:
ldr x5, =vectors
msr vbar_el1, x5
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 9249020..dd9671c 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -77,6 +77,14 @@ bool __kprobes aarch64_insn_is_nop(u32 insn)
}
}
+bool aarch64_insn_is_branch_imm(u32 insn)
+{
+ return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
+ aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
+ aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
+ aarch64_insn_is_bcond(insn));
+}
+
static DEFINE_SPINLOCK(patch_lock);
static void __kprobes *patch_map(void *addr, int fixmap)
@@ -1057,6 +1065,58 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
}
+/*
+ * Decode the imm field of a branch, and return the byte offset as a
+ * signed value (so it can be used when computing a new branch
+ * target).
+ */
+s32 aarch64_get_branch_offset(u32 insn)
+{
+ s32 imm;
+
+ if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
+ imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
+ return (imm << 6) >> 4;
+ }
+
+ if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
+ aarch64_insn_is_bcond(insn)) {
+ imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
+ return (imm << 13) >> 11;
+ }
+
+ if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
+ imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
+ return (imm << 18) >> 16;
+ }
+
+ /* Unhandled instruction */
+ BUG();
+}
+
+/*
+ * Encode the displacement of a branch in the imm field and return the
+ * updated instruction.
+ */
+u32 aarch64_set_branch_offset(u32 insn, s32 offset)
+{
+ if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
+ offset >> 2);
+
+ if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
+ aarch64_insn_is_bcond(insn))
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
+ offset >> 2);
+
+ if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
+ offset >> 2);
+
+ /* Unhandled instruction */
+ BUG();
+}
+
bool aarch32_insn_is_wide(u32 insn)
{
return insn >= 0xe800;
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index cce18c8..702591f 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -488,7 +488,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
}
err = request_irq(irq, armpmu->handle_irq,
- IRQF_NOBALANCING,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
"arm-pmu", armpmu);
if (err) {
pr_err("unable to request IRQ%d for ARM PMU counters\n",
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index c6b1f3b..223b093 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -58,14 +58,6 @@ unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
-void soft_restart(unsigned long addr)
-{
- setup_mm_for_reboot();
- cpu_soft_restart(virt_to_phys(cpu_reset), addr);
- /* Should never get here */
- BUG();
-}
-
/*
* Function pointers to optional machine specific functions
*/
@@ -136,9 +128,7 @@ void machine_power_off(void)
/*
* Restart requires that the secondary CPUs stop performing any activity
- * while the primary CPU resets the system. Systems with a single CPU can
- * use soft_restart() as their machine descriptor's .restart hook, since that
- * will cause the only available CPU to reset. Systems with multiple CPUs must
+ * while the primary CPU resets the system. Systems with multiple CPUs must
* provide a HW restart implementation, to ensure that all CPUs reset at once.
* This is required so that any code running after reset on the primary CPU
* doesn't have to co-ordinate with other CPUs to ensure they aren't still
@@ -243,7 +233,8 @@ void release_thread(struct task_struct *dead_task)
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
- fpsimd_preserve_current_state();
+ if (current->mm)
+ fpsimd_preserve_current_state();
*dst = *src;
return 0;
}
@@ -254,35 +245,35 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
unsigned long stk_sz, struct task_struct *p)
{
struct pt_regs *childregs = task_pt_regs(p);
- unsigned long tls = p->thread.tp_value;
memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
if (likely(!(p->flags & PF_KTHREAD))) {
*childregs = *current_pt_regs();
childregs->regs[0] = 0;
- if (is_compat_thread(task_thread_info(p))) {
- if (stack_start)
+
+ /*
+ * Read the current TLS pointer from tpidr_el0 as it may be
+ * out-of-sync with the saved value.
+ */
+ asm("mrs %0, tpidr_el0" : "=r" (*task_user_tls(p)));
+
+ if (stack_start) {
+ if (is_compat_thread(task_thread_info(p)))
childregs->compat_sp = stack_start;
- } else {
- /*
- * Read the current TLS pointer from tpidr_el0 as it may be
- * out-of-sync with the saved value.
- */
- asm("mrs %0, tpidr_el0" : "=r" (tls));
- if (stack_start) {
- /* 16-byte aligned stack mandatory on AArch64 */
- if (stack_start & 15)
- return -EINVAL;
+ /* 16-byte aligned stack mandatory on AArch64 */
+ else if (stack_start & 15)
+ return -EINVAL;
+ else
childregs->sp = stack_start;
- }
}
+
/*
* If a TLS pointer was passed to clone (4th argument), use it
* for the new thread.
*/
if (clone_flags & CLONE_SETTLS)
- tls = childregs->regs[3];
+ p->thread.tp_value = childregs->regs[3];
} else {
memset(childregs, 0, sizeof(struct pt_regs));
childregs->pstate = PSR_MODE_EL1h;
@@ -291,7 +282,6 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
}
p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
p->thread.cpu_context.sp = (unsigned long)childregs;
- p->thread.tp_value = tls;
ptrace_hw_copy_thread(p);
@@ -302,18 +292,12 @@ static void tls_thread_switch(struct task_struct *next)
{
unsigned long tpidr, tpidrro;
- if (!is_compat_task()) {
- asm("mrs %0, tpidr_el0" : "=r" (tpidr));
- current->thread.tp_value = tpidr;
- }
+ asm("mrs %0, tpidr_el0" : "=r" (tpidr));
+ *task_user_tls(current) = tpidr;
- if (is_compat_thread(task_thread_info(next))) {
- tpidr = 0;
- tpidrro = next->thread.tp_value;
- } else {
- tpidr = next->thread.tp_value;
- tpidrro = 0;
- }
+ tpidr = *task_user_tls(next);
+ tpidrro = is_compat_thread(task_thread_info(next)) ?
+ next->thread.tp_value : 0;
asm(
" msr tpidr_el0, %0\n"
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index ea18cb5..869f202 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -15,7 +15,6 @@
#define pr_fmt(fmt) "psci: " fmt
-#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/smp.h>
@@ -25,8 +24,8 @@
#include <linux/slab.h>
#include <uapi/linux/psci.h>
-#include <asm/acpi.h>
#include <asm/compiler.h>
+#include <asm/cputype.h>
#include <asm/cpu_ops.h>
#include <asm/errno.h>
#include <asm/psci.h>
@@ -37,16 +36,31 @@
#define PSCI_POWER_STATE_TYPE_STANDBY 0
#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
-struct psci_power_state {
- u16 id;
- u8 type;
- u8 affinity_level;
-};
+static bool psci_power_state_loses_context(u32 state)
+{
+ return state & PSCI_0_2_POWER_STATE_TYPE_MASK;
+}
+
+static bool psci_power_state_is_valid(u32 state)
+{
+ const u32 valid_mask = PSCI_0_2_POWER_STATE_ID_MASK |
+ PSCI_0_2_POWER_STATE_TYPE_MASK |
+ PSCI_0_2_POWER_STATE_AFFL_MASK;
+
+ return !(state & ~valid_mask);
+}
+
+/*
+ * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF
+ * calls to its resident CPU, so we must avoid issuing those. We never migrate
+ * a Trusted OS even if it claims to be capable of migration -- doing so will
+ * require cooperation with a Trusted OS driver.
+ */
+static int resident_cpu = -1;
struct psci_operations {
- int (*cpu_suspend)(struct psci_power_state state,
- unsigned long entry_point);
- int (*cpu_off)(struct psci_power_state state);
+ int (*cpu_suspend)(u32 state, unsigned long entry_point);
+ int (*cpu_off)(u32 state);
int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
int (*migrate)(unsigned long cpuid);
int (*affinity_info)(unsigned long target_affinity,
@@ -56,23 +70,21 @@ struct psci_operations {
static struct psci_operations psci_ops;
-static int (*invoke_psci_fn)(u64, u64, u64, u64);
-typedef int (*psci_initcall_t)(const struct device_node *);
-
-asmlinkage int __invoke_psci_fn_hvc(u64, u64, u64, u64);
-asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64);
+typedef unsigned long (psci_fn)(unsigned long, unsigned long,
+ unsigned long, unsigned long);
+asmlinkage psci_fn __invoke_psci_fn_hvc;
+asmlinkage psci_fn __invoke_psci_fn_smc;
+static psci_fn *invoke_psci_fn;
enum psci_function {
PSCI_FN_CPU_SUSPEND,
PSCI_FN_CPU_ON,
PSCI_FN_CPU_OFF,
PSCI_FN_MIGRATE,
- PSCI_FN_AFFINITY_INFO,
- PSCI_FN_MIGRATE_INFO_TYPE,
PSCI_FN_MAX,
};
-static DEFINE_PER_CPU_READ_MOSTLY(struct psci_power_state *, psci_power_state);
+static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
static u32 psci_function_id[PSCI_FN_MAX];
@@ -92,56 +104,28 @@ static int psci_to_linux_errno(int errno)
return -EINVAL;
}
-static u32 psci_power_state_pack(struct psci_power_state state)
-{
- return ((state.id << PSCI_0_2_POWER_STATE_ID_SHIFT)
- & PSCI_0_2_POWER_STATE_ID_MASK) |
- ((state.type << PSCI_0_2_POWER_STATE_TYPE_SHIFT)
- & PSCI_0_2_POWER_STATE_TYPE_MASK) |
- ((state.affinity_level << PSCI_0_2_POWER_STATE_AFFL_SHIFT)
- & PSCI_0_2_POWER_STATE_AFFL_MASK);
-}
-
-static void psci_power_state_unpack(u32 power_state,
- struct psci_power_state *state)
-{
- state->id = (power_state & PSCI_0_2_POWER_STATE_ID_MASK) >>
- PSCI_0_2_POWER_STATE_ID_SHIFT;
- state->type = (power_state & PSCI_0_2_POWER_STATE_TYPE_MASK) >>
- PSCI_0_2_POWER_STATE_TYPE_SHIFT;
- state->affinity_level =
- (power_state & PSCI_0_2_POWER_STATE_AFFL_MASK) >>
- PSCI_0_2_POWER_STATE_AFFL_SHIFT;
-}
-
-static int psci_get_version(void)
+static u32 psci_get_version(void)
{
- int err;
-
- err = invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
- return err;
+ return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
}
-static int psci_cpu_suspend(struct psci_power_state state,
- unsigned long entry_point)
+static int psci_cpu_suspend(u32 state, unsigned long entry_point)
{
int err;
- u32 fn, power_state;
+ u32 fn;
fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
- power_state = psci_power_state_pack(state);
- err = invoke_psci_fn(fn, power_state, entry_point, 0);
+ err = invoke_psci_fn(fn, state, entry_point, 0);
return psci_to_linux_errno(err);
}
-static int psci_cpu_off(struct psci_power_state state)
+static int psci_cpu_off(u32 state)
{
int err;
- u32 fn, power_state;
+ u32 fn;
fn = psci_function_id[PSCI_FN_CPU_OFF];
- power_state = psci_power_state_pack(state);
- err = invoke_psci_fn(fn, power_state, 0, 0);
+ err = invoke_psci_fn(fn, state, 0, 0);
return psci_to_linux_errno(err);
}
@@ -168,30 +152,29 @@ static int psci_migrate(unsigned long cpuid)
static int psci_affinity_info(unsigned long target_affinity,
unsigned long lowest_affinity_level)
{
- int err;
- u32 fn;
-
- fn = psci_function_id[PSCI_FN_AFFINITY_INFO];
- err = invoke_psci_fn(fn, target_affinity, lowest_affinity_level, 0);
- return err;
+ return invoke_psci_fn(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity,
+ lowest_affinity_level, 0);
}
static int psci_migrate_info_type(void)
{
- int err;
- u32 fn;
+ return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
+}
- fn = psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE];
- err = invoke_psci_fn(fn, 0, 0, 0);
- return err;
+static unsigned long psci_migrate_info_up_cpu(void)
+{
+ return invoke_psci_fn(PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU, 0, 0, 0);
}
-static int __maybe_unused cpu_psci_cpu_init_idle(struct device_node *cpu_node,
- unsigned int cpu)
+static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu)
{
int i, ret, count = 0;
- struct psci_power_state *psci_states;
- struct device_node *state_node;
+ u32 *psci_states;
+ struct device_node *state_node, *cpu_node;
+
+ cpu_node = of_get_cpu_node(cpu, NULL);
+ if (!cpu_node)
+ return -ENODEV;
/*
* If the PSCI cpu_suspend function hook has not been initialized
@@ -215,13 +198,13 @@ static int __maybe_unused cpu_psci_cpu_init_idle(struct device_node *cpu_node,
return -ENOMEM;
for (i = 0; i < count; i++) {
- u32 psci_power_state;
+ u32 state;
state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
ret = of_property_read_u32(state_node,
"arm,psci-suspend-param",
- &psci_power_state);
+ &state);
if (ret) {
pr_warn(" * %s missing arm,psci-suspend-param property\n",
state_node->full_name);
@@ -230,9 +213,13 @@ static int __maybe_unused cpu_psci_cpu_init_idle(struct device_node *cpu_node,
}
of_node_put(state_node);
- pr_debug("psci-power-state %#x index %d\n", psci_power_state,
- i);
- psci_power_state_unpack(psci_power_state, &psci_states[i]);
+ pr_debug("psci-power-state %#x index %d\n", state, i);
+ if (!psci_power_state_is_valid(state)) {
+ pr_warn("Invalid PSCI power state %#x\n", state);
+ ret = -EINVAL;
+ goto free_mem;
+ }
+ psci_states[i] = state;
}
/* Idle states parsed correctly, initialize per-cpu pointer */
per_cpu(psci_power_state, cpu) = psci_states;
@@ -275,6 +262,46 @@ static void psci_sys_poweroff(void)
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
}
+/*
+ * Detect the presence of a resident Trusted OS which may cause CPU_OFF to
+ * return DENIED (which would be fatal).
+ */
+static void __init psci_init_migrate(void)
+{
+ unsigned long cpuid;
+ int type, cpu;
+
+ type = psci_ops.migrate_info_type();
+
+ if (type == PSCI_0_2_TOS_MP) {
+ pr_info("Trusted OS migration not required\n");
+ return;
+ }
+
+ if (type == PSCI_RET_NOT_SUPPORTED) {
+ pr_info("MIGRATE_INFO_TYPE not supported.\n");
+ return;
+ }
+
+ if (type != PSCI_0_2_TOS_UP_MIGRATE &&
+ type != PSCI_0_2_TOS_UP_NO_MIGRATE) {
+ pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
+ return;
+ }
+
+ cpuid = psci_migrate_info_up_cpu();
+ if (cpuid & ~MPIDR_HWID_BITMASK) {
+ pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n",
+ cpuid);
+ return;
+ }
+
+ cpu = get_logical_index(cpuid);
+ resident_cpu = cpu >= 0 ? cpu : -1;
+
+ pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
+}
+
static void __init psci_0_2_set_functions(void)
{
pr_info("Using standard PSCI v0.2 function IDs\n");
@@ -290,11 +317,8 @@ static void __init psci_0_2_set_functions(void)
psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE;
psci_ops.migrate = psci_migrate;
- psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN64_AFFINITY_INFO;
psci_ops.affinity_info = psci_affinity_info;
- psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] =
- PSCI_0_2_FN_MIGRATE_INFO_TYPE;
psci_ops.migrate_info_type = psci_migrate_info_type;
arm_pm_restart = psci_sys_reset;
@@ -307,32 +331,26 @@ static void __init psci_0_2_set_functions(void)
*/
static int __init psci_probe(void)
{
- int ver = psci_get_version();
-
- if (ver == PSCI_RET_NOT_SUPPORTED) {
- /*
- * PSCI versions >=0.2 mandates implementation of
- * PSCI_VERSION.
- */
- pr_err("PSCI firmware does not comply with the v0.2 spec.\n");
- return -EOPNOTSUPP;
- } else {
- pr_info("PSCIv%d.%d detected in firmware.\n",
- PSCI_VERSION_MAJOR(ver),
- PSCI_VERSION_MINOR(ver));
-
- if (PSCI_VERSION_MAJOR(ver) == 0 &&
- PSCI_VERSION_MINOR(ver) < 2) {
- pr_err("Conflicting PSCI version detected.\n");
- return -EINVAL;
- }
+ u32 ver = psci_get_version();
+
+ pr_info("PSCIv%d.%d detected in firmware.\n",
+ PSCI_VERSION_MAJOR(ver),
+ PSCI_VERSION_MINOR(ver));
+
+ if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
+ pr_err("Conflicting PSCI version detected.\n");
+ return -EINVAL;
}
psci_0_2_set_functions();
+ psci_init_migrate();
+
return 0;
}
+typedef int (*psci_initcall_t)(const struct device_node *);
+
/*
* PSCI init function for PSCI versions >=0.2
*
@@ -421,6 +439,7 @@ int __init psci_dt_init(void)
return init_fn(np);
}
+#ifdef CONFIG_ACPI
/*
* We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
* explicitly clarified in SBBR
@@ -441,10 +460,11 @@ int __init psci_acpi_init(void)
return psci_probe();
}
+#endif
#ifdef CONFIG_SMP
-static int __init cpu_psci_cpu_init(struct device_node *dn, unsigned int cpu)
+static int __init cpu_psci_cpu_init(unsigned int cpu)
{
return 0;
}
@@ -469,11 +489,21 @@ static int cpu_psci_cpu_boot(unsigned int cpu)
}
#ifdef CONFIG_HOTPLUG_CPU
+static bool psci_tos_resident_on(int cpu)
+{
+ return cpu == resident_cpu;
+}
+
static int cpu_psci_cpu_disable(unsigned int cpu)
{
/* Fail early if we don't have CPU_OFF support */
if (!psci_ops.cpu_off)
return -EOPNOTSUPP;
+
+ /* Trusted OS will deny CPU_OFF */
+ if (psci_tos_resident_on(cpu))
+ return -EPERM;
+
return 0;
}
@@ -484,9 +514,8 @@ static void cpu_psci_cpu_die(unsigned int cpu)
* There are no known implementations of PSCI actually using the
* power state field, pass a sensible default for now.
*/
- struct psci_power_state state = {
- .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
- };
+ u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN <<
+ PSCI_0_2_POWER_STATE_TYPE_SHIFT;
ret = psci_ops.cpu_off(state);
@@ -498,7 +527,7 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
int err, i;
if (!psci_ops.affinity_info)
- return 1;
+ return 0;
/*
* cpu_kill could race with cpu_die and we can
* potentially end up declaring this cpu undead
@@ -509,7 +538,7 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
pr_info("CPU%d killed.\n", cpu);
- return 1;
+ return 0;
}
msleep(10);
@@ -518,15 +547,14 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
cpu, err);
- /* Make op_cpu_kill() fail. */
- return 0;
+ return -ETIMEDOUT;
}
#endif
#endif
static int psci_suspend_finisher(unsigned long index)
{
- struct psci_power_state *state = __this_cpu_read(psci_power_state);
+ u32 *state = __this_cpu_read(psci_power_state);
return psci_ops.cpu_suspend(state[index - 1],
virt_to_phys(cpu_resume));
@@ -535,7 +563,7 @@ static int psci_suspend_finisher(unsigned long index)
static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
{
int ret;
- struct psci_power_state *state = __this_cpu_read(psci_power_state);
+ u32 *state = __this_cpu_read(psci_power_state);
/*
* idle state index 0 corresponds to wfi, should never be called
* from the cpu_suspend operations
@@ -543,10 +571,10 @@ static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
if (WARN_ON_ONCE(!index))
return -EINVAL;
- if (state[index - 1].type == PSCI_POWER_STATE_TYPE_STANDBY)
+ if (!psci_power_state_loses_context(state[index - 1]))
ret = psci_ops.cpu_suspend(state[index - 1], 0);
else
- ret = __cpu_suspend(index, psci_suspend_finisher);
+ ret = cpu_suspend(index, psci_suspend_finisher);
return ret;
}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 7475313..ffd3970 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -105,18 +105,6 @@ static struct resource mem_res[] = {
#define kernel_code mem_res[0]
#define kernel_data mem_res[1]
-void __init early_print(const char *str, ...)
-{
- char buf[256];
- va_list ap;
-
- va_start(ap, str);
- vsnprintf(buf, sizeof(buf), str, ap);
- va_end(ap);
-
- printk("%s", buf);
-}
-
/*
* The recorded values of x0 .. x3 upon kernel entry.
*/
@@ -326,12 +314,14 @@ static void __init setup_processor(void)
static void __init setup_machine_fdt(phys_addr_t dt_phys)
{
- if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) {
- early_print("\n"
- "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
- "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
- "\nPlease check your bootloader.\n",
- dt_phys, phys_to_virt(dt_phys));
+ void *dt_virt = fixmap_remap_fdt(dt_phys);
+
+ if (!dt_virt || !early_init_dt_scan(dt_virt)) {
+ pr_crit("\n"
+ "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
+ "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
+ "\nPlease check your bootloader.",
+ &dt_phys, dt_virt);
while (true)
cpu_relax();
@@ -374,8 +364,6 @@ void __init setup_arch(char **cmdline_p)
{
setup_processor();
- setup_machine_fdt(__fdt_pointer);
-
init_mm.start_code = (unsigned long) _text;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
@@ -386,6 +374,8 @@ void __init setup_arch(char **cmdline_p)
early_fixmap_init();
early_ioremap_init();
+ setup_machine_fdt(__fdt_pointer);
+
parse_early_param();
/*
@@ -408,16 +398,13 @@ void __init setup_arch(char **cmdline_p)
if (acpi_disabled) {
unflatten_device_tree();
psci_dt_init();
- cpu_read_bootcpu_ops();
-#ifdef CONFIG_SMP
- of_smp_init_cpus();
-#endif
} else {
psci_acpi_init();
- acpi_init_cpus();
}
+ cpu_read_bootcpu_ops();
#ifdef CONFIG_SMP
+ smp_init_cpus();
smp_build_mpidr_hash();
#endif
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index d26fcd4..1670f15 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -370,7 +370,7 @@ badframe:
if (show_unhandled_signals)
pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
current->comm, task_pid_nr(current), __func__,
- regs->pc, regs->sp);
+ regs->pc, regs->compat_sp);
force_sig(SIGSEGV, current);
return 0;
}
@@ -407,7 +407,7 @@ badframe:
if (show_unhandled_signals)
pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
current->comm, task_pid_nr(current), __func__,
- regs->pc, regs->sp);
+ regs->pc, regs->compat_sp);
force_sig(SIGSEGV, current);
return 0;
}
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index ede186c..803cfea 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -130,12 +130,14 @@ ENDPROC(__cpu_suspend_enter)
/*
* x0 must contain the sctlr value retrieved from restored context
*/
+ .pushsection ".idmap.text", "ax"
ENTRY(cpu_resume_mmu)
ldr x3, =cpu_resume_after_mmu
msr sctlr_el1, x0 // restore sctlr_el1
isb
br x3 // global jump to virtual address
ENDPROC(cpu_resume_mmu)
+ .popsection
cpu_resume_after_mmu:
mov x0, #0 // return zero on success
ldp x19, x20, [sp, #16]
@@ -162,15 +164,12 @@ ENTRY(cpu_resume)
#else
mov x7, xzr
#endif
- adrp x0, sleep_save_sp
- add x0, x0, #:lo12:sleep_save_sp
- ldr x0, [x0, #SLEEP_SAVE_SP_PHYS]
+ ldr_l x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
ldr x0, [x0, x7, lsl #3]
/* load sp from context */
ldr x2, [x0, #CPU_CTX_SP]
- adrp x1, sleep_idmap_phys
/* load physical address of identity map page table in x1 */
- ldr x1, [x1, #:lo12:sleep_idmap_phys]
+ adrp x1, idmap_pg_dir
mov sp, x2
/*
* cpu_do_resume expects x0 to contain context physical address
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 2cb0081..4b2121b 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
@@ -248,20 +249,20 @@ static int op_cpu_kill(unsigned int cpu)
* time and hope that it's dead, so let's skip the wait and just hope.
*/
if (!cpu_ops[cpu]->cpu_kill)
- return 1;
+ return 0;
return cpu_ops[cpu]->cpu_kill(cpu);
}
-static DECLARE_COMPLETION(cpu_died);
-
/*
* called on the thread which is asking for a CPU to be shutdown -
* waits until shutdown has completed, or it is timed out.
*/
void __cpu_die(unsigned int cpu)
{
- if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
+ int err;
+
+ if (!cpu_wait_death(cpu, 5)) {
pr_crit("CPU%u: cpu didn't die\n", cpu);
return;
}
@@ -273,8 +274,10 @@ void __cpu_die(unsigned int cpu)
* verify that it has really left the kernel before we consider
* clobbering anything it might still be using.
*/
- if (!op_cpu_kill(cpu))
- pr_warn("CPU%d may not have shut down cleanly\n", cpu);
+ err = op_cpu_kill(cpu);
+ if (err)
+ pr_warn("CPU%d may not have shut down cleanly: %d\n",
+ cpu, err);
}
/*
@@ -294,7 +297,7 @@ void cpu_die(void)
local_irq_disable();
/* Tell __cpu_die() that this CPU is now safe to dispose of */
- complete(&cpu_died);
+ (void)cpu_report_death();
/*
* Actually shutdown the CPU. This must never fail. The specific hotplug
@@ -318,57 +321,158 @@ void __init smp_prepare_boot_cpu(void)
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
}
+static u64 __init of_get_cpu_mpidr(struct device_node *dn)
+{
+ const __be32 *cell;
+ u64 hwid;
+
+ /*
+ * A cpu node with missing "reg" property is
+ * considered invalid to build a cpu_logical_map
+ * entry.
+ */
+ cell = of_get_property(dn, "reg", NULL);
+ if (!cell) {
+ pr_err("%s: missing reg property\n", dn->full_name);
+ return INVALID_HWID;
+ }
+
+ hwid = of_read_number(cell, of_n_addr_cells(dn));
+ /*
+ * Non affinity bits must be set to 0 in the DT
+ */
+ if (hwid & ~MPIDR_HWID_BITMASK) {
+ pr_err("%s: invalid reg property\n", dn->full_name);
+ return INVALID_HWID;
+ }
+ return hwid;
+}
+
+/*
+ * Duplicate MPIDRs are a recipe for disaster. Scan all initialized
+ * entries and check for duplicates. If any is found just ignore the
+ * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
+ * matching valid MPIDR values.
+ */
+static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
+{
+ unsigned int i;
+
+ for (i = 1; (i < cpu) && (i < NR_CPUS); i++)
+ if (cpu_logical_map(i) == hwid)
+ return true;
+ return false;
+}
+
+/*
+ * Initialize cpu operations for a logical cpu and
+ * set it in the possible mask on success
+ */
+static int __init smp_cpu_setup(int cpu)
+{
+ if (cpu_read_ops(cpu))
+ return -ENODEV;
+
+ if (cpu_ops[cpu]->cpu_init(cpu))
+ return -ENODEV;
+
+ set_cpu_possible(cpu, true);
+
+ return 0;
+}
+
+static bool bootcpu_valid __initdata;
+static unsigned int cpu_count = 1;
+
+#ifdef CONFIG_ACPI
+/*
+ * acpi_map_gic_cpu_interface - parse processor MADT entry
+ *
+ * Carry out sanity checks on MADT processor entry and initialize
+ * cpu_logical_map on success
+ */
+static void __init
+acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
+{
+ u64 hwid = processor->arm_mpidr;
+
+ if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
+ pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
+ return;
+ }
+
+ if (!(processor->flags & ACPI_MADT_ENABLED)) {
+ pr_err("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
+ return;
+ }
+
+ if (is_mpidr_duplicate(cpu_count, hwid)) {
+ pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid);
+ return;
+ }
+
+ /* Check if GICC structure of boot CPU is available in the MADT */
+ if (cpu_logical_map(0) == hwid) {
+ if (bootcpu_valid) {
+ pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n",
+ hwid);
+ return;
+ }
+ bootcpu_valid = true;
+ return;
+ }
+
+ if (cpu_count >= NR_CPUS)
+ return;
+
+ /* map the logical cpu id to cpu MPIDR */
+ cpu_logical_map(cpu_count) = hwid;
+
+ cpu_count++;
+}
+
+static int __init
+acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_interrupt *processor;
+
+ processor = (struct acpi_madt_generic_interrupt *)header;
+ if (BAD_MADT_ENTRY(processor, end))
+ return -EINVAL;
+
+ acpi_table_print_madt_entry(header);
+
+ acpi_map_gic_cpu_interface(processor);
+
+ return 0;
+}
+#else
+#define acpi_table_parse_madt(...) do { } while (0)
+#endif
+
/*
* Enumerate the possible CPU set from the device tree and build the
* cpu logical map array containing MPIDR values related to logical
* cpus. Assumes that cpu_logical_map(0) has already been initialized.
*/
-void __init of_smp_init_cpus(void)
+void __init of_parse_and_init_cpus(void)
{
struct device_node *dn = NULL;
- unsigned int i, cpu = 1;
- bool bootcpu_valid = false;
while ((dn = of_find_node_by_type(dn, "cpu"))) {
- const u32 *cell;
- u64 hwid;
+ u64 hwid = of_get_cpu_mpidr(dn);
- /*
- * A cpu node with missing "reg" property is
- * considered invalid to build a cpu_logical_map
- * entry.
- */
- cell = of_get_property(dn, "reg", NULL);
- if (!cell) {
- pr_err("%s: missing reg property\n", dn->full_name);
+ if (hwid == INVALID_HWID)
goto next;
- }
- hwid = of_read_number(cell, of_n_addr_cells(dn));
- /*
- * Non affinity bits must be set to 0 in the DT
- */
- if (hwid & ~MPIDR_HWID_BITMASK) {
- pr_err("%s: invalid reg property\n", dn->full_name);
+ if (is_mpidr_duplicate(cpu_count, hwid)) {
+ pr_err("%s: duplicate cpu reg properties in the DT\n",
+ dn->full_name);
goto next;
}
/*
- * Duplicate MPIDRs are a recipe for disaster. Scan
- * all initialized entries and check for
- * duplicates. If any is found just ignore the cpu.
- * cpu_logical_map was initialized to INVALID_HWID to
- * avoid matching valid MPIDR values.
- */
- for (i = 1; (i < cpu) && (i < NR_CPUS); i++) {
- if (cpu_logical_map(i) == hwid) {
- pr_err("%s: duplicate cpu reg properties in the DT\n",
- dn->full_name);
- goto next;
- }
- }
-
- /*
* The numbering scheme requires that the boot CPU
* must be assigned logical id 0. Record it so that
* the logical map built from DT is validated and can
@@ -392,38 +496,58 @@ void __init of_smp_init_cpus(void)
continue;
}
- if (cpu >= NR_CPUS)
- goto next;
-
- if (cpu_read_ops(dn, cpu) != 0)
- goto next;
-
- if (cpu_ops[cpu]->cpu_init(dn, cpu))
+ if (cpu_count >= NR_CPUS)
goto next;
pr_debug("cpu logical map 0x%llx\n", hwid);
- cpu_logical_map(cpu) = hwid;
+ cpu_logical_map(cpu_count) = hwid;
next:
- cpu++;
+ cpu_count++;
}
+}
+
+/*
+ * Enumerate the possible CPU set from the device tree or ACPI and build the
+ * cpu logical map array containing MPIDR values related to logical
+ * cpus. Assumes that cpu_logical_map(0) has already been initialized.
+ */
+void __init smp_init_cpus(void)
+{
+ int i;
- /* sanity check */
- if (cpu > NR_CPUS)
- pr_warning("no. of cores (%d) greater than configured maximum of %d - clipping\n",
- cpu, NR_CPUS);
+ if (acpi_disabled)
+ of_parse_and_init_cpus();
+ else
+ /*
+ * do a walk of MADT to determine how many CPUs
+ * we have including disabled CPUs, and get information
+ * we need for SMP init
+ */
+ acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+ acpi_parse_gic_cpu_interface, 0);
+
+ if (cpu_count > NR_CPUS)
+ pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n",
+ cpu_count, NR_CPUS);
if (!bootcpu_valid) {
- pr_err("DT missing boot CPU MPIDR, not enabling secondaries\n");
+ pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
return;
}
/*
- * All the cpus that made it to the cpu_logical_map have been
- * validated so set them as possible cpus.
+ * We need to set the cpu_logical_map entries before enabling
+ * the cpus so that cpu processor description entries (DT cpu nodes
+ * and ACPI MADT entries) can be retrieved by matching the cpu hwid
+ * with entries in cpu_logical_map while initializing the cpus.
+ * If the cpu set-up fails, invalidate the cpu_logical_map entry.
*/
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_logical_map(i) != INVALID_HWID)
- set_cpu_possible(i, true);
+ for (i = 1; i < NR_CPUS; i++) {
+ if (cpu_logical_map(i) != INVALID_HWID) {
+ if (smp_cpu_setup(i))
+ cpu_logical_map(i) = INVALID_HWID;
+ }
+ }
}
void __init smp_prepare_cpus(unsigned int max_cpus)
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 14944e5..aef3605 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -49,8 +49,14 @@ static void write_pen_release(u64 val)
}
-static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu)
+static int smp_spin_table_cpu_init(unsigned int cpu)
{
+ struct device_node *dn;
+
+ dn = of_get_cpu_node(cpu, NULL);
+ if (!dn)
+ return -ENODEV;
+
/*
* Determine the address from which the CPU is polling.
*/
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index d7daf45..8297d50 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -51,13 +51,13 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
}
/*
- * __cpu_suspend
+ * cpu_suspend
*
* arg: argument to pass to the finisher function
* fn: finisher function pointer
*
*/
-int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
+int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
struct mm_struct *mm = current->active_mm;
int ret;
@@ -82,7 +82,7 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
* We are resuming from reset with TTBR0_EL1 set to the
* idmap to enable the MMU; restore the active_mm mappings in
* TTBR0_EL1 unless the active_mm == &init_mm, in which case
- * the thread entered __cpu_suspend with TTBR0_EL1 set to
+ * the thread entered cpu_suspend with TTBR0_EL1 set to
* reserved TTBR0 page tables and should be restored as such.
*/
if (mm == &init_mm)
@@ -118,7 +118,6 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
}
struct sleep_save_sp sleep_save_sp;
-phys_addr_t sleep_idmap_phys;
static int __init cpu_suspend_init(void)
{
@@ -132,9 +131,7 @@ static int __init cpu_suspend_init(void)
sleep_save_sp.save_ptr_stash = ctx_ptr;
sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
- sleep_idmap_phys = virt_to_phys(idmap_pg_dir);
__flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp));
- __flush_dcache_area(&sleep_idmap_phys, sizeof(sleep_idmap_phys));
return 0;
}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 1ef2940..a12251c 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -335,8 +335,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
if (call_undef_hook(regs) == 0)
return;
- if (show_unhandled_signals && unhandled_signal(current, SIGILL) &&
- printk_ratelimit()) {
+ if (show_unhandled_signals_ratelimited() && unhandled_signal(current, SIGILL)) {
pr_info("%s[%d]: undefined instruction: pc=%p\n",
current->comm, task_pid_nr(current), pc);
dump_instr(KERN_INFO, regs);
@@ -363,7 +362,7 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
}
#endif
- if (show_unhandled_signals && printk_ratelimit()) {
+ if (show_unhandled_signals_ratelimited()) {
pr_info("%s[%d]: syscall %d\n", current->comm,
task_pid_nr(current), (int)regs->syscallno);
dump_instr("", regs);
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index ff3bdde..f6fe17d 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -15,6 +15,10 @@ ccflags-y := -shared -fno-common -fno-builtin
ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
$(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared
+# down to collect2, resulting in silent corruption of the vDSO image.
+ccflags-y += -Wl,-shared
+
obj-y += vdso.o
extra-y += vdso.lds vdso-offsets.h
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index a2c2986..9807333 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -38,6 +38,12 @@ jiffies = jiffies_64;
*(.hyp.text) \
VMLINUX_SYMBOL(__hyp_text_end) = .;
+#define IDMAP_TEXT \
+ . = ALIGN(SZ_4K); \
+ VMLINUX_SYMBOL(__idmap_text_start) = .; \
+ *(.idmap.text) \
+ VMLINUX_SYMBOL(__idmap_text_end) = .;
+
/*
* The size of the PE/COFF section that covers the kernel image, which
* runs from stext to _edata, must be a round multiple of the PE/COFF
@@ -95,6 +101,7 @@ SECTIONS
SCHED_TEXT
LOCK_TEXT
HYPERVISOR_TEXT
+ IDMAP_TEXT
*(.fixup)
*(.gnu.warning)
. = ALIGN(16);
@@ -167,11 +174,13 @@ SECTIONS
}
/*
- * The HYP init code can't be more than a page long,
+ * The HYP init code and ID map text can't be longer than a page each,
* and should not cross a page boundary.
*/
ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
"HYP init code too big or misaligned")
+ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
+ "ID map text too big or misaligned")
/*
* If padding is applied before .head.text, virt<->phys conversions will fail.
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 519805f..17a8fb1 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -17,8 +17,10 @@
#include <linux/linkage.h>
+#include <asm/alternative.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
+#include <asm/cpufeature.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
#include <asm/fpsimdmacros.h>
@@ -808,10 +810,7 @@
* Call into the vgic backend for state saving
*/
.macro save_vgic_state
- adr x24, __vgic_sr_vectors
- ldr x24, [x24, VGIC_SAVE_FN]
- kern_hyp_va x24
- blr x24
+ alternative_insn "bl __save_vgic_v2_state", "bl __save_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
mrs x24, hcr_el2
mov x25, #HCR_INT_OVERRIDE
neg x25, x25
@@ -828,10 +827,7 @@
orr x24, x24, #HCR_INT_OVERRIDE
orr x24, x24, x25
msr hcr_el2, x24
- adr x24, __vgic_sr_vectors
- ldr x24, [x24, #VGIC_RESTORE_FN]
- kern_hyp_va x24
- blr x24
+ alternative_insn "bl __restore_vgic_v2_state", "bl __restore_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
.endm
.macro save_timer_state
@@ -1062,12 +1058,6 @@ ENTRY(__kvm_flush_vm_context)
ret
ENDPROC(__kvm_flush_vm_context)
- // struct vgic_sr_vectors __vgi_sr_vectors;
- .align 3
-ENTRY(__vgic_sr_vectors)
- .skip VGIC_SR_VECTOR_SZ
-ENDPROC(__vgic_sr_vectors)
-
__kvm_hyp_panic:
// Guess the context by looking at VTTBR:
// If zero, then we're already a host.
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 773d37a..9d84feb 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -4,3 +4,5 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
context.o proc.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_ARM64_PTDUMP) += dump.o
+
+CFLAGS_mmu.o := -I$(srctree)/scripts/dtc/libfdt/
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 2560e1e..bdeb5d3 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -22,84 +22,11 @@
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/cpufeature.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
#include "proc-macros.S"
/*
- * __flush_dcache_all()
- *
- * Flush the whole D-cache.
- *
- * Corrupted registers: x0-x7, x9-x11
- */
-__flush_dcache_all:
- dmb sy // ensure ordering with previous memory accesses
- mrs x0, clidr_el1 // read clidr
- and x3, x0, #0x7000000 // extract loc from clidr
- lsr x3, x3, #23 // left align loc bit field
- cbz x3, finished // if loc is 0, then no need to clean
- mov x10, #0 // start clean at cache level 0
-loop1:
- add x2, x10, x10, lsr #1 // work out 3x current cache level
- lsr x1, x0, x2 // extract cache type bits from clidr
- and x1, x1, #7 // mask of the bits for current cache only
- cmp x1, #2 // see what cache we have at this level
- b.lt skip // skip if no cache, or just i-cache
- save_and_disable_irqs x9 // make CSSELR and CCSIDR access atomic
- msr csselr_el1, x10 // select current cache level in csselr
- isb // isb to sych the new cssr&csidr
- mrs x1, ccsidr_el1 // read the new ccsidr
- restore_irqs x9
- and x2, x1, #7 // extract the length of the cache lines
- add x2, x2, #4 // add 4 (line length offset)
- mov x4, #0x3ff
- and x4, x4, x1, lsr #3 // find maximum number on the way size
- clz w5, w4 // find bit position of way size increment
- mov x7, #0x7fff
- and x7, x7, x1, lsr #13 // extract max number of the index size
-loop2:
- mov x9, x4 // create working copy of max way size
-loop3:
- lsl x6, x9, x5
- orr x11, x10, x6 // factor way and cache number into x11
- lsl x6, x7, x2
- orr x11, x11, x6 // factor index number into x11
- dc cisw, x11 // clean & invalidate by set/way
- subs x9, x9, #1 // decrement the way
- b.ge loop3
- subs x7, x7, #1 // decrement the index
- b.ge loop2
-skip:
- add x10, x10, #2 // increment cache number
- cmp x3, x10
- b.gt loop1
-finished:
- mov x10, #0 // swith back to cache level 0
- msr csselr_el1, x10 // select current cache level in csselr
- dsb sy
- isb
- ret
-ENDPROC(__flush_dcache_all)
-
-/*
- * flush_cache_all()
- *
- * Flush the entire cache system. The data cache flush is now achieved
- * using atomic clean / invalidates working outwards from L1 cache. This
- * is done using Set/Way based cache maintainance instructions. The
- * instruction cache can still be invalidated back to the point of
- * unification in a single instruction.
- */
-ENTRY(flush_cache_all)
- mov x12, lr
- bl __flush_dcache_all
- mov x0, #0
- ic ialluis // I+BTB cache invalidate
- ret x12
-ENDPROC(flush_cache_all)
-
-/*
* flush_icache_range(start,end)
*
* Ensure that the I and D caches are coherent within specified region.
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index baa758d..76c1e6c 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -92,6 +92,14 @@ static void reset_context(void *info)
unsigned int cpu = smp_processor_id();
struct mm_struct *mm = current->active_mm;
+ /*
+ * current->active_mm could be init_mm for the idle thread immediately
+ * after secondary CPU boot or hotplug. TTBR0_EL1 is already set to
+ * the reserved value, so no need to reset any context.
+ */
+ if (mm == &init_mm)
+ return;
+
smp_rmb();
asid = cpu_last_asid + cpu;
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 0948d32..b1fc69c 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -115,8 +115,7 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
{
struct siginfo si;
- if (show_unhandled_signals && unhandled_signal(tsk, sig) &&
- printk_ratelimit()) {
+ if (show_unhandled_signals_ratelimited() && unhandled_signal(tsk, sig)) {
pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
addr, esr);
@@ -478,12 +477,19 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
struct pt_regs *regs)
{
struct siginfo info;
+ struct task_struct *tsk = current;
+
+ if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS))
+ pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n",
+ tsk->comm, task_pid_nr(tsk),
+ esr_get_class_string(esr), (void *)regs->pc,
+ (void *)regs->sp);
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRALN;
info.si_addr = (void __user *)addr;
- arm64_notify_die("", regs, &info, esr);
+ arm64_notify_die("Oops - SP/PC alignment exception", regs, &info, esr);
}
static struct fault_info debug_fault_info[] = {
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index b6f14e8..4dfa397 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -102,7 +102,6 @@ EXPORT_SYMBOL(flush_dcache_page);
/*
* Additional functions defined in assembly.
*/
-EXPORT_SYMBOL(flush_cache_all);
EXPORT_SYMBOL(flush_icache_range);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 597831b..ad87ce8 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -262,7 +262,7 @@ static void __init free_unused_memmap(void)
* memmap entries are valid from the bank end aligned to
* MAX_ORDER_NR_PAGES.
*/
- prev_end = ALIGN(start + __phys_to_pfn(reg->size),
+ prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
MAX_ORDER_NR_PAGES);
}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 5b8b664..82d3435 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
+#include <linux/libfdt.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/memblock.h>
@@ -643,3 +644,68 @@ void __set_fixmap(enum fixed_addresses idx,
flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
}
}
+
+void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
+{
+ const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
+ pgprot_t prot = PAGE_KERNEL | PTE_RDONLY;
+ int granularity, size, offset;
+ void *dt_virt;
+
+ /*
+ * Check whether the physical FDT address is set and meets the minimum
+ * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
+ * at least 8 bytes so that we can always access the size field of the
+ * FDT header after mapping the first chunk, double check here if that
+ * is indeed the case.
+ */
+ BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
+ if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
+ return NULL;
+
+ /*
+ * Make sure that the FDT region can be mapped without the need to
+ * allocate additional translation table pages, so that it is safe
+ * to call create_mapping() this early.
+ *
+ * On 64k pages, the FDT will be mapped using PTEs, so we need to
+ * be in the same PMD as the rest of the fixmap.
+ * On 4k pages, we'll use section mappings for the FDT so we only
+ * have to be in the same PUD.
+ */
+ BUILD_BUG_ON(dt_virt_base % SZ_2M);
+
+ if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) {
+ BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> PMD_SHIFT !=
+ __fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT);
+
+ granularity = PAGE_SIZE;
+ } else {
+ BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> PUD_SHIFT !=
+ __fix_to_virt(FIX_BTMAP_BEGIN) >> PUD_SHIFT);
+
+ granularity = PMD_SIZE;
+ }
+
+ offset = dt_phys % granularity;
+ dt_virt = (void *)dt_virt_base + offset;
+
+ /* map the first chunk so we can read the size from the header */
+ create_mapping(round_down(dt_phys, granularity), dt_virt_base,
+ granularity, prot);
+
+ if (fdt_check_header(dt_virt) != 0)
+ return NULL;
+
+ size = fdt_totalsize(dt_virt);
+ if (size > MAX_FDT_SIZE)
+ return NULL;
+
+ if (offset + size > granularity)
+ create_mapping(round_down(dt_phys, granularity), dt_virt_base,
+ round_up(offset + size, granularity), prot);
+
+ memblock_reserve(dt_phys, size);
+
+ return dt_virt;
+}
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index cdd754e..39139a3 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -46,52 +46,6 @@
#define MAIR(attr, mt) ((attr) << ((mt) * 8))
/*
- * cpu_cache_off()
- *
- * Turn the CPU D-cache off.
- */
-ENTRY(cpu_cache_off)
- mrs x0, sctlr_el1
- bic x0, x0, #1 << 2 // clear SCTLR.C
- msr sctlr_el1, x0
- isb
- ret
-ENDPROC(cpu_cache_off)
-
-/*
- * cpu_reset(loc)
- *
- * Perform a soft reset of the system. Put the CPU into the same state
- * as it would be if it had been reset, and branch to what would be the
- * reset vector. It must be executed with the flat identity mapping.
- *
- * - loc - location to jump to for soft reset
- */
- .align 5
-ENTRY(cpu_reset)
- mrs x1, sctlr_el1
- bic x1, x1, #1
- msr sctlr_el1, x1 // disable the MMU
- isb
- ret x0
-ENDPROC(cpu_reset)
-
-ENTRY(cpu_soft_restart)
- /* Save address of cpu_reset() and reset address */
- mov x19, x0
- mov x20, x1
-
- /* Turn D-cache off */
- bl cpu_cache_off
-
- /* Push out all dirty data, and ensure cache is empty */
- bl flush_cache_all
-
- mov x0, x20
- ret x19
-ENDPROC(cpu_soft_restart)
-
-/*
* cpu_do_idle()
*
* Idle the processor (wait for interrupt).
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 50a5087..8b888b1 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -572,6 +572,7 @@ static void __init early_reserve_mem_dt(void)
int len;
const __be32 *prop;
+ early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem();
dt_root = of_get_flat_dt_root();
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index cde35c5d01..f2dd23a 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -580,11 +580,6 @@ void __init early_init_fdt_scan_reserved_mem(void)
if (!initial_boot_params)
return;
- /* Reserve the dtb region */
- early_init_dt_reserve_memory_arch(__pa(initial_boot_params),
- fdt_totalsize(initial_boot_params),
- 0);
-
/* Process header /memreserve/ fields */
for (n = 0; ; n++) {
fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
@@ -598,6 +593,20 @@ void __init early_init_fdt_scan_reserved_mem(void)
}
/**
+ * early_init_fdt_reserve_self() - reserve the memory used by the FDT blob
+ */
+void __init early_init_fdt_reserve_self(void)
+{
+ if (!initial_boot_params)
+ return;
+
+ /* Reserve the dtb region */
+ early_init_dt_reserve_memory_arch(__pa(initial_boot_params),
+ fdt_totalsize(initial_boot_params),
+ 0);
+}
+
+/**
* of_scan_flat_dt - scan flattened tree blob and call callback on each.
* @it: callback function
* @data: context data pointer
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 587ee50..fd627a5 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -64,6 +64,7 @@ extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
int depth, void *data);
extern void early_init_fdt_scan_reserved_mem(void);
+extern void early_init_fdt_reserve_self(void);
extern void early_init_dt_add_memory_arch(u64 base, u64 size);
extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
bool no_map);
@@ -91,6 +92,7 @@ extern u64 fdt_translate_address(const void *blob, int node_offset);
extern void of_fdt_limit_memory(int limit);
#else /* CONFIG_OF_FLATTREE */
static inline void early_init_fdt_scan_reserved_mem(void) {}
+static inline void early_init_fdt_reserve_self(void) {}
static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
static inline void unflatten_device_tree(void) {}
static inline void unflatten_and_copy_device_tree(void) {}
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index f94d887..bc40137 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -2126,9 +2126,6 @@ int kvm_vgic_hyp_init(void)
goto out_free_irq;
}
- /* Callback into for arch code for setup */
- vgic_arch_setup(vgic);
-
on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
return 0;
OpenPOWER on IntegriCloud