summaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2015-05-22 16:32:02 +0200
committerArnd Bergmann <arnd@arndb.de>2015-05-22 16:32:02 +0200
commita4526915b67afb09c60bece6bc395e58ba83e271 (patch)
tree374b787a3be3c23b96e391fdfd06183f8d6ba270 /arch/arm
parent8064887e02fd684fa0576cbbcdbc29ceab125c4c (diff)
parented5cd8163da8d3e02ef83b84e42d555d40bab96a (diff)
downloadop-kernel-dev-a4526915b67afb09c60bece6bc395e58ba83e271.zip
op-kernel-dev-a4526915b67afb09c60bece6bc395e58ba83e271.tar.gz
Merge tag 'arm-soc/for-4.2/soc-take2' of http://github.com/broadcom/stblinux into next/soc
Merge mach-bcm changes from Florian Fainelli: This pull request contains the following changes: - Rafal adds an additional fault code to be ignored by the kernel on BCM5301X SoC - BCM63138 SMP support which: * common code to control the PMB bus, to be shared with a reset controller driver in drivers/reset * secondary CPU initialization sequence using PMB helpers * small changes suggested by Russell King to allow platforms to disable VFP * tag 'arm-soc/for-4.2/soc-take2' of http://github.com/broadcom/stblinux: ARM: BCM63xx: Add SMP support for BCM63138 ARM: vfp: Add vfp_disable for problematic platforms ARM: vfp: Add include guards ARM: BCM63xx: Add secondary CPU PMB initialization sequence ARM: BCM63xx: Add Broadcom BCM63xx PMB controller helpers ARM: BCM5301X: Ignore another (BCM4709 specific) fault code
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/include/asm/vfp.h9
-rw-r--r--arch/arm/mach-bcm/Makefile7
-rw-r--r--arch/arm/mach-bcm/bcm63xx_headsmp.S23
-rw-r--r--arch/arm/mach-bcm/bcm63xx_pmb.c221
-rw-r--r--arch/arm/mach-bcm/bcm63xx_smp.c169
-rw-r--r--arch/arm/mach-bcm/bcm63xx_smp.h9
-rw-r--r--arch/arm/mach-bcm/bcm_5301x.c9
-rw-r--r--arch/arm/vfp/vfpmodule.c13
8 files changed, 455 insertions, 5 deletions
diff --git a/arch/arm/include/asm/vfp.h b/arch/arm/include/asm/vfp.h
index ee5f308..22e4140 100644
--- a/arch/arm/include/asm/vfp.h
+++ b/arch/arm/include/asm/vfp.h
@@ -5,6 +5,9 @@
* First, the standard VFP set.
*/
+#ifndef __ASM_VFP_H
+#define __ASM_VFP_H
+
#define FPSID cr0
#define FPSCR cr1
#define MVFR1 cr6
@@ -87,3 +90,9 @@
#define VFPOPDESC_UNUSED_BIT (24)
#define VFPOPDESC_UNUSED_MASK (0xFF << VFPOPDESC_UNUSED_BIT)
#define VFPOPDESC_OPDESC_MASK (~(VFPOPDESC_LENGTH_MASK | VFPOPDESC_UNUSED_MASK))
+
+#ifndef __ASSEMBLY__
+void vfp_disable(void);
+#endif
+
+#endif /* __ASM_VFP_H */
diff --git a/arch/arm/mach-bcm/Makefile b/arch/arm/mach-bcm/Makefile
index 4c38674..55d3a12 100644
--- a/arch/arm/mach-bcm/Makefile
+++ b/arch/arm/mach-bcm/Makefile
@@ -38,7 +38,12 @@ obj-$(CONFIG_ARCH_BCM2835) += board_bcm2835.o
obj-$(CONFIG_ARCH_BCM_5301X) += bcm_5301x.o
# BCM63XXx
-obj-$(CONFIG_ARCH_BCM_63XX) := bcm63xx.o
+ifeq ($(CONFIG_ARCH_BCM_63XX),y)
+CFLAGS_bcm63xx_headsmp.o += -march=armv7-a
+obj-y += bcm63xx.o
+obj-$(CONFIG_SMP) += bcm63xx_smp.o bcm63xx_headsmp.o \
+ bcm63xx_pmb.o
+endif
ifeq ($(CONFIG_ARCH_BRCMSTB),y)
CFLAGS_platsmp-brcmstb.o += -march=armv7-a
diff --git a/arch/arm/mach-bcm/bcm63xx_headsmp.S b/arch/arm/mach-bcm/bcm63xx_headsmp.S
new file mode 100644
index 0000000..c7af397
--- /dev/null
+++ b/arch/arm/mach-bcm/bcm63xx_headsmp.S
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2015, Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/assembler.h>
+
+ENTRY(bcm63138_secondary_startup)
+ ARM_BE8(setend be)
+ /*
+ * L1 cache does have unpredictable contents at power-up clean its
+ * contents without flushing
+ */
+ bl v7_invalidate_l1
+ nop
+
+ b secondary_startup
+ENDPROC(bcm63138_secondary_startup)
diff --git a/arch/arm/mach-bcm/bcm63xx_pmb.c b/arch/arm/mach-bcm/bcm63xx_pmb.c
new file mode 100644
index 0000000..c39752b
--- /dev/null
+++ b/arch/arm/mach-bcm/bcm63xx_pmb.c
@@ -0,0 +1,221 @@
+/*
+ * Broadcom BCM63138 PMB initialization for secondary CPU(s)
+ *
+ * Copyright (C) 2015 Broadcom Corporation
+ * Author: Florian Fainelli <f.fainelli@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/reset/bcm63xx_pmb.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include "bcm63xx_smp.h"
+
+/* ARM Control register definitions */
+#define CORE_PWR_CTRL_SHIFT 0
+#define CORE_PWR_CTRL_MASK 0x3
+#define PLL_PWR_ON BIT(8)
+#define PLL_LDO_PWR_ON BIT(9)
+#define PLL_CLAMP_ON BIT(10)
+#define CPU_RESET_N(x) BIT(13 + (x))
+#define NEON_RESET_N BIT(15)
+#define PWR_CTRL_STATUS_SHIFT 28
+#define PWR_CTRL_STATUS_MASK 0x3
+#define PWR_DOWN_SHIFT 30
+#define PWR_DOWN_MASK 0x3
+
+/* CPU Power control register definitions */
+#define MEM_PWR_OK BIT(0)
+#define MEM_PWR_ON BIT(1)
+#define MEM_CLAMP_ON BIT(2)
+#define MEM_PWR_OK_STATUS BIT(4)
+#define MEM_PWR_ON_STATUS BIT(5)
+#define MEM_PDA_SHIFT 8
+#define MEM_PDA_MASK 0xf
+#define MEM_PDA_CPU_MASK 0x1
+#define MEM_PDA_NEON_MASK 0xf
+#define CLAMP_ON BIT(15)
+#define PWR_OK_SHIFT 16
+#define PWR_OK_MASK 0xf
+#define PWR_ON_SHIFT 20
+#define PWR_CPU_MASK 0x03
+#define PWR_NEON_MASK 0x01
+#define PWR_ON_MASK 0xf
+#define PWR_OK_STATUS_SHIFT 24
+#define PWR_OK_STATUS_MASK 0xf
+#define PWR_ON_STATUS_SHIFT 28
+#define PWR_ON_STATUS_MASK 0xf
+
+#define ARM_CONTROL 0x30
+#define ARM_PWR_CONTROL_BASE 0x34
+#define ARM_PWR_CONTROL(x) (ARM_PWR_CONTROL_BASE + (x) * 0x4)
+#define ARM_NEON_L2 0x3c
+
+/* Perform a value write, then spin until the value shifted by
+ * shift is seen, masked with mask and is different from cond.
+ */
+static int bpcm_wr_rd_mask(void __iomem *master,
+ unsigned int addr, u32 off, u32 *val,
+ u32 shift, u32 mask, u32 cond)
+{
+ int ret;
+
+ ret = bpcm_wr(master, addr, off, *val);
+ if (ret)
+ return ret;
+
+ do {
+ ret = bpcm_rd(master, addr, off, val);
+ if (ret)
+ return ret;
+
+ cpu_relax();
+ } while (((*val >> shift) & mask) != cond);
+
+ return ret;
+}
+
+/* Global lock to serialize accesses to the PMB registers while we
+ * are bringing up the secondary CPU
+ */
+static DEFINE_SPINLOCK(pmb_lock);
+
+static int bcm63xx_pmb_get_resources(struct device_node *dn,
+ void __iomem **base,
+ unsigned int *cpu,
+ unsigned int *addr)
+{
+ struct device_node *pmb_dn;
+ struct of_phandle_args args;
+ int ret;
+
+ ret = of_property_read_u32(dn, "reg", cpu);
+ if (ret) {
+ pr_err("CPU is missing a reg node\n");
+ return ret;
+ }
+
+ ret = of_parse_phandle_with_args(dn, "resets", "#reset-cells",
+ 0, &args);
+ if (ret) {
+ pr_err("CPU is missing a resets phandle\n");
+ return ret;
+ }
+
+ pmb_dn = args.np;
+ if (args.args_count != 2) {
+ pr_err("reset-controller does not conform to reset-cells\n");
+ return -EINVAL;
+ }
+
+ *base = of_iomap(args.np, 0);
+ if (!*base) {
+ pr_err("failed remapping PMB register\n");
+ return -ENOMEM;
+ }
+
+ /* We do not need the number of zones */
+ *addr = args.args[0];
+
+ return 0;
+}
+
+int bcm63xx_pmb_power_on_cpu(struct device_node *dn)
+{
+ void __iomem *base;
+ unsigned int cpu, addr;
+ unsigned long flags;
+ u32 val, ctrl;
+ int ret;
+
+ ret = bcm63xx_pmb_get_resources(dn, &base, &cpu, &addr);
+ if (ret)
+ return ret;
+
+ /* We would not know how to enable a third and greater CPU */
+ WARN_ON(cpu > 1);
+
+ spin_lock_irqsave(&pmb_lock, flags);
+
+ /* Check if the CPU is already on and save the ARM_CONTROL register
+ * value since we will use it later for CPU de-assert once done with
+ * the CPU-specific power sequence
+ */
+ ret = bpcm_rd(base, addr, ARM_CONTROL, &ctrl);
+ if (ret)
+ return ret;
+
+ if (ctrl & CPU_RESET_N(cpu)) {
+ pr_info("PMB: CPU%d is already powered on\n", cpu);
+ ret = 0;
+ goto out;
+ }
+
+ /* Power on PLL */
+ ret = bpcm_rd(base, addr, ARM_PWR_CONTROL(cpu), &val);
+ if (ret)
+ goto out;
+
+ val |= (PWR_CPU_MASK << PWR_ON_SHIFT);
+
+ ret = bpcm_wr_rd_mask(base, addr, ARM_PWR_CONTROL(cpu), &val,
+ PWR_ON_STATUS_SHIFT, PWR_CPU_MASK, PWR_CPU_MASK);
+ if (ret)
+ goto out;
+
+ val |= (PWR_CPU_MASK << PWR_OK_SHIFT);
+
+ ret = bpcm_wr_rd_mask(base, addr, ARM_PWR_CONTROL(cpu), &val,
+ PWR_OK_STATUS_SHIFT, PWR_CPU_MASK, PWR_CPU_MASK);
+ if (ret)
+ goto out;
+
+ val &= ~CLAMP_ON;
+
+ ret = bpcm_wr(base, addr, ARM_PWR_CONTROL(cpu), val);
+ if (ret)
+ goto out;
+
+ /* Power on CPU<N> RAM */
+ val &= ~(MEM_PDA_MASK << MEM_PDA_SHIFT);
+
+ ret = bpcm_wr(base, addr, ARM_PWR_CONTROL(cpu), val);
+ if (ret)
+ goto out;
+
+ val |= MEM_PWR_ON;
+
+ ret = bpcm_wr_rd_mask(base, addr, ARM_PWR_CONTROL(cpu), &val,
+ 0, MEM_PWR_ON_STATUS, MEM_PWR_ON_STATUS);
+ if (ret)
+ goto out;
+
+ val |= MEM_PWR_OK;
+
+ ret = bpcm_wr_rd_mask(base, addr, ARM_PWR_CONTROL(cpu), &val,
+ 0, MEM_PWR_OK_STATUS, MEM_PWR_OK_STATUS);
+ if (ret)
+ goto out;
+
+ val &= ~MEM_CLAMP_ON;
+
+ ret = bpcm_wr(base, addr, ARM_PWR_CONTROL(cpu), val);
+ if (ret)
+ goto out;
+
+ /* De-assert CPU reset */
+ ctrl |= CPU_RESET_N(cpu);
+
+ ret = bpcm_wr(base, addr, ARM_CONTROL, ctrl);
+out:
+ spin_unlock_irqrestore(&pmb_lock, flags);
+ iounmap(base);
+ return ret;
+}
diff --git a/arch/arm/mach-bcm/bcm63xx_smp.c b/arch/arm/mach-bcm/bcm63xx_smp.c
new file mode 100644
index 0000000..3f014f1
--- /dev/null
+++ b/arch/arm/mach-bcm/bcm63xx_smp.c
@@ -0,0 +1,169 @@
+/*
+ * Broadcom BCM63138 DSL SoCs SMP support code
+ *
+ * Copyright (C) 2015, Broadcom Corporation
+ *
+ * Licensed under the terms of the GPLv2
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include <asm/cacheflush.h>
+#include <asm/smp_scu.h>
+#include <asm/smp_plat.h>
+#include <asm/vfp.h>
+
+#include "bcm63xx_smp.h"
+
+/* Size of mapped Cortex A9 SCU address space */
+#define CORTEX_A9_SCU_SIZE 0x58
+
+/*
+ * Enable the Cortex A9 Snoop Control Unit
+ *
+ * By the time this is called we already know there are multiple
+ * cores present. We assume we're running on a Cortex A9 processor,
+ * so any trouble getting the base address register or getting the
+ * SCU base is a problem.
+ *
+ * Return 0 if successful or an error code otherwise.
+ */
+static int __init scu_a9_enable(void)
+{
+ unsigned long config_base;
+ void __iomem *scu_base;
+ unsigned int i, ncores;
+
+ if (!scu_a9_has_base()) {
+ pr_err("no configuration base address register!\n");
+ return -ENXIO;
+ }
+
+ /* Config base address register value is zero for uniprocessor */
+ config_base = scu_a9_get_base();
+ if (!config_base) {
+ pr_err("hardware reports only one core\n");
+ return -ENOENT;
+ }
+
+ scu_base = ioremap((phys_addr_t)config_base, CORTEX_A9_SCU_SIZE);
+ if (!scu_base) {
+ pr_err("failed to remap config base (%lu/%u) for SCU\n",
+ config_base, CORTEX_A9_SCU_SIZE);
+ return -ENOMEM;
+ }
+
+ scu_enable(scu_base);
+
+ ncores = scu_base ? scu_get_core_count(scu_base) : 1;
+
+ if (ncores > nr_cpu_ids) {
+ pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+ ncores, nr_cpu_ids);
+ ncores = nr_cpu_ids;
+ }
+
+ /* The BCM63138 SoC has two Cortex-A9 CPUs, CPU0 features a complete
+ * and fully functional VFP unit that can be used, but CPU1 does not.
+ * Since we will not be able to trap kernel-mode NEON to force
+ * migration to CPU0, just do not advertise VFP support at all.
+ *
+ * This will make vfp_init bail out and do not attempt to use VFP at
+ * all, for kernel-mode NEON, we do not want to introduce any
+ * conditionals in hot-paths, so we just restrict the system to UP.
+ */
+#ifdef CONFIG_VFP
+ if (ncores > 1) {
+ pr_warn("SMP: secondary CPUs lack VFP unit, disabling VFP\n");
+ vfp_disable();
+
+#ifdef CONFIG_KERNEL_MODE_NEON
+ WARN(1, "SMP: kernel-mode NEON enabled, restricting to UP\n");
+ ncores = 1;
+#endif
+ }
+#endif
+
+ for (i = 0; i < ncores; i++)
+ set_cpu_possible(i, true);
+
+ iounmap(scu_base); /* That's the last we'll need of this */
+
+ return 0;
+}
+
+static const struct of_device_id bcm63138_bootlut_ids[] = {
+ { .compatible = "brcm,bcm63138-bootlut", },
+ { /* sentinel */ },
+};
+
+#define BOOTLUT_RESET_VECT 0x20
+
+static int bcm63138_smp_boot_secondary(unsigned int cpu,
+ struct task_struct *idle)
+{
+ void __iomem *bootlut_base;
+ struct device_node *dn;
+ int ret = 0;
+ u32 val;
+
+ dn = of_find_matching_node(NULL, bcm63138_bootlut_ids);
+ if (!dn) {
+ pr_err("SMP: unable to find bcm63138 boot LUT node\n");
+ return -ENODEV;
+ }
+
+ bootlut_base = of_iomap(dn, 0);
+ of_node_put(dn);
+
+ if (!bootlut_base) {
+ pr_err("SMP: unable to remap boot LUT base register\n");
+ return -ENOMEM;
+ }
+
+ /* Locate the secondary CPU node */
+ dn = of_get_cpu_node(cpu_logical_map(cpu), NULL);
+ if (!dn) {
+ pr_err("SMP: failed to locate secondary CPU%d node\n", cpu);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* Write the secondary init routine to the BootLUT reset vector */
+ val = virt_to_phys(bcm63138_secondary_startup);
+ writel_relaxed(val, bootlut_base + BOOTLUT_RESET_VECT);
+
+ /* Power up the core, will jump straight to its reset vector when we
+ * return
+ */
+ ret = bcm63xx_pmb_power_on_cpu(dn);
+ if (ret)
+ goto out;
+out:
+ iounmap(bootlut_base);
+
+ return ret;
+}
+
+static void __init bcm63138_smp_prepare_cpus(unsigned int max_cpus)
+{
+ int ret;
+
+ ret = scu_a9_enable();
+ if (ret) {
+ pr_warn("SMP: Cortex-A9 SCU setup failed\n");
+ return;
+ }
+}
+
+struct smp_operations bcm63138_smp_ops __initdata = {
+ .smp_prepare_cpus = bcm63138_smp_prepare_cpus,
+ .smp_boot_secondary = bcm63138_smp_boot_secondary,
+};
+
+CPU_METHOD_OF_DECLARE(bcm63138_smp, "brcm,bcm63138", &bcm63138_smp_ops);
diff --git a/arch/arm/mach-bcm/bcm63xx_smp.h b/arch/arm/mach-bcm/bcm63xx_smp.h
new file mode 100644
index 0000000..50b7604
--- /dev/null
+++ b/arch/arm/mach-bcm/bcm63xx_smp.h
@@ -0,0 +1,9 @@
+#ifndef __BCM63XX_SMP_H
+#define __BCM63XX_SMP_H
+
+struct device_node;
+
+extern void bcm63138_secondary_startup(void);
+extern int bcm63xx_pmb_power_on_cpu(struct device_node *dn);
+
+#endif /* __BCM63XX_SMP_H */
diff --git a/arch/arm/mach-bcm/bcm_5301x.c b/arch/arm/mach-bcm/bcm_5301x.c
index e9bcbdb..7aef927 100644
--- a/arch/arm/mach-bcm/bcm_5301x.c
+++ b/arch/arm/mach-bcm/bcm_5301x.c
@@ -18,15 +18,16 @@ static bool first_fault = true;
static int bcm5301x_abort_handler(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
- if (fsr == 0x1c06 && first_fault) {
+ if ((fsr == 0x1406 || fsr == 0x1c06) && first_fault) {
first_fault = false;
/*
- * These faults with code 0x1c06 happens for no good reason,
- * possibly left over from the CFE boot loader.
+ * These faults with codes 0x1406 (BCM4709) or 0x1c06 happens
+ * for no good reason, possibly left over from the CFE boot
+ * loader.
*/
pr_warn("External imprecise Data abort at addr=%#lx, fsr=%#x ignored.\n",
- addr, fsr);
+ addr, fsr);
/* Returning non-zero causes fault display and panic */
return 0;
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index f6e4d56..2a61e4b 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -445,6 +445,19 @@ static void vfp_enable(void *unused)
set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));
}
+/* Called by platforms on which we want to disable VFP because it may not be
+ * present on all CPUs within a SMP complex. Needs to be called prior to
+ * vfp_init().
+ */
+void vfp_disable(void)
+{
+ if (VFP_arch) {
+ pr_debug("%s: should be called prior to vfp_init\n", __func__);
+ return;
+ }
+ VFP_arch = 1;
+}
+
#ifdef CONFIG_CPU_PM
static int vfp_pm_suspend(void)
{
OpenPOWER on IntegriCloud