summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-06-08 11:10:58 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-08 11:10:58 -0700
commit410feb75de245664d66bc05ab2e2412751d10acf (patch)
tree73deae83ab33a7c0668eb00eb2e1d347b20782c9 /drivers/base
parent2996148a9d4169f19a57827003c75605ce3b152b (diff)
parent0fe42512b2f03f9e5a20b9f55ef1013a68b4cd48 (diff)
downloadop-kernel-dev-410feb75de245664d66bc05ab2e2412751d10acf.zip
op-kernel-dev-410feb75de245664d66bc05ab2e2412751d10acf.tar.gz
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas: "Apart from the core arm64 and perf changes, the Spectre v4 mitigation touches the arm KVM code and the ACPI PPTT support touches drivers/ (acpi and cacheinfo). I should have the maintainers' acks in place. Summary: - Spectre v4 mitigation (Speculative Store Bypass Disable) support for arm64 using SMC firmware call to set a hardware chicken bit - ACPI PPTT (Processor Properties Topology Table) parsing support and enable the feature for arm64 - Report signal frame size to user via auxv (AT_MINSIGSTKSZ). The primary motivation is Scalable Vector Extensions which requires more space on the signal frame than the currently defined MINSIGSTKSZ - ARM perf patches: allow building arm-cci as module, demote dev_warn() to dev_dbg() in arm-ccn event_init(), miscellaneous cleanups - cmpwait() WFE optimisation to avoid some spurious wakeups - L1_CACHE_BYTES reverted back to 64 (for performance reasons that have to do with some network allocations) while keeping ARCH_DMA_MINALIGN to 128. cache_line_size() returns the actual hardware Cache Writeback Granule - Turn LSE atomics on by default in Kconfig - Kernel fault reporting tidying - Some #include and miscellaneous cleanups" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (53 commits) arm64: Fix syscall restarting around signal suppressed by tracer arm64: topology: Avoid checking numa mask for scheduler MC selection ACPI / PPTT: fix build when CONFIG_ACPI_PPTT is not enabled arm64: cpu_errata: include required headers arm64: KVM: Move VCPU_WORKAROUND_2_FLAG macros to the top of the file arm64: signal: Report signal frame size to userspace via auxv arm64/sve: Thin out initialisation sanity-checks for sve_max_vl arm64: KVM: Add ARCH_WORKAROUND_2 discovery through ARCH_FEATURES_FUNC_ID arm64: KVM: Handle guest's ARCH_WORKAROUND_2 requests arm64: KVM: Add ARCH_WORKAROUND_2 support for guests arm64: KVM: Add HYP per-cpu accessors arm64: ssbd: Add prctl interface for per-thread mitigation arm64: ssbd: Introduce thread flag to control userspace mitigation arm64: ssbd: Restore mitigation status on CPU resume arm64: ssbd: Skip apply_ssbd if not using dynamic mitigation arm64: ssbd: Add global mitigation state accessor arm64: Add 'ssbd' command-line option arm64: Add ARCH_WORKAROUND_2 probing arm64: Add per-cpu infrastructure to call ARCH_WORKAROUND_2 arm64: Call ARCH_WORKAROUND_2 on transitions between EL0 and EL1 ...
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/cacheinfo.c157
1 files changed, 79 insertions, 78 deletions
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index edf7262..2880e2a 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -32,50 +32,10 @@ struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
}
#ifdef CONFIG_OF
-static int cache_setup_of_node(unsigned int cpu)
-{
- struct device_node *np;
- struct cacheinfo *this_leaf;
- struct device *cpu_dev = get_cpu_device(cpu);
- struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
- unsigned int index = 0;
-
- /* skip if of_node is already populated */
- if (this_cpu_ci->info_list->of_node)
- return 0;
-
- if (!cpu_dev) {
- pr_err("No cpu device for CPU %d\n", cpu);
- return -ENODEV;
- }
- np = cpu_dev->of_node;
- if (!np) {
- pr_err("Failed to find cpu%d device node\n", cpu);
- return -ENOENT;
- }
-
- while (index < cache_leaves(cpu)) {
- this_leaf = this_cpu_ci->info_list + index;
- if (this_leaf->level != 1)
- np = of_find_next_cache_node(np);
- else
- np = of_node_get(np);/* cpu node itself */
- if (!np)
- break;
- this_leaf->of_node = np;
- index++;
- }
-
- if (index != cache_leaves(cpu)) /* not all OF nodes populated */
- return -ENOENT;
-
- return 0;
-}
-
static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
struct cacheinfo *sib_leaf)
{
- return sib_leaf->of_node == this_leaf->of_node;
+ return sib_leaf->fw_token == this_leaf->fw_token;
}
/* OF properties to query for a given cache type */
@@ -111,7 +71,7 @@ static inline int get_cacheinfo_idx(enum cache_type type)
return type;
}
-static void cache_size(struct cacheinfo *this_leaf)
+static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
{
const char *propname;
const __be32 *cache_size;
@@ -120,13 +80,14 @@ static void cache_size(struct cacheinfo *this_leaf)
ct_idx = get_cacheinfo_idx(this_leaf->type);
propname = cache_type_info[ct_idx].size_prop;
- cache_size = of_get_property(this_leaf->of_node, propname, NULL);
+ cache_size = of_get_property(np, propname, NULL);
if (cache_size)
this_leaf->size = of_read_number(cache_size, 1);
}
/* not cache_line_size() because that's a macro in include/linux/cache.h */
-static void cache_get_line_size(struct cacheinfo *this_leaf)
+static void cache_get_line_size(struct cacheinfo *this_leaf,
+ struct device_node *np)
{
const __be32 *line_size;
int i, lim, ct_idx;
@@ -138,7 +99,7 @@ static void cache_get_line_size(struct cacheinfo *this_leaf)
const char *propname;
propname = cache_type_info[ct_idx].line_size_props[i];
- line_size = of_get_property(this_leaf->of_node, propname, NULL);
+ line_size = of_get_property(np, propname, NULL);
if (line_size)
break;
}
@@ -147,7 +108,7 @@ static void cache_get_line_size(struct cacheinfo *this_leaf)
this_leaf->coherency_line_size = of_read_number(line_size, 1);
}
-static void cache_nr_sets(struct cacheinfo *this_leaf)
+static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
{
const char *propname;
const __be32 *nr_sets;
@@ -156,7 +117,7 @@ static void cache_nr_sets(struct cacheinfo *this_leaf)
ct_idx = get_cacheinfo_idx(this_leaf->type);
propname = cache_type_info[ct_idx].nr_sets_prop;
- nr_sets = of_get_property(this_leaf->of_node, propname, NULL);
+ nr_sets = of_get_property(np, propname, NULL);
if (nr_sets)
this_leaf->number_of_sets = of_read_number(nr_sets, 1);
}
@@ -175,41 +136,77 @@ static void cache_associativity(struct cacheinfo *this_leaf)
this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
}
-static bool cache_node_is_unified(struct cacheinfo *this_leaf)
+static bool cache_node_is_unified(struct cacheinfo *this_leaf,
+ struct device_node *np)
{
- return of_property_read_bool(this_leaf->of_node, "cache-unified");
+ return of_property_read_bool(np, "cache-unified");
}
-static void cache_of_override_properties(unsigned int cpu)
+static void cache_of_set_props(struct cacheinfo *this_leaf,
+ struct device_node *np)
{
- int index;
+ /*
+ * init_cache_level must setup the cache level correctly
+ * overriding the architecturally specified levels, so
+ * if type is NONE at this stage, it should be unified
+ */
+ if (this_leaf->type == CACHE_TYPE_NOCACHE &&
+ cache_node_is_unified(this_leaf, np))
+ this_leaf->type = CACHE_TYPE_UNIFIED;
+ cache_size(this_leaf, np);
+ cache_get_line_size(this_leaf, np);
+ cache_nr_sets(this_leaf, np);
+ cache_associativity(this_leaf);
+}
+
+static int cache_setup_of_node(unsigned int cpu)
+{
+ struct device_node *np;
struct cacheinfo *this_leaf;
+ struct device *cpu_dev = get_cpu_device(cpu);
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ unsigned int index = 0;
- for (index = 0; index < cache_leaves(cpu); index++) {
+ /* skip if fw_token is already populated */
+ if (this_cpu_ci->info_list->fw_token) {
+ return 0;
+ }
+
+ if (!cpu_dev) {
+ pr_err("No cpu device for CPU %d\n", cpu);
+ return -ENODEV;
+ }
+ np = cpu_dev->of_node;
+ if (!np) {
+ pr_err("Failed to find cpu%d device node\n", cpu);
+ return -ENOENT;
+ }
+
+ while (index < cache_leaves(cpu)) {
this_leaf = this_cpu_ci->info_list + index;
- /*
- * init_cache_level must setup the cache level correctly
- * overriding the architecturally specified levels, so
- * if type is NONE at this stage, it should be unified
- */
- if (this_leaf->type == CACHE_TYPE_NOCACHE &&
- cache_node_is_unified(this_leaf))
- this_leaf->type = CACHE_TYPE_UNIFIED;
- cache_size(this_leaf);
- cache_get_line_size(this_leaf);
- cache_nr_sets(this_leaf);
- cache_associativity(this_leaf);
+ if (this_leaf->level != 1)
+ np = of_find_next_cache_node(np);
+ else
+ np = of_node_get(np);/* cpu node itself */
+ if (!np)
+ break;
+ cache_of_set_props(this_leaf, np);
+ this_leaf->fw_token = np;
+ index++;
}
+
+ if (index != cache_leaves(cpu)) /* not all OF nodes populated */
+ return -ENOENT;
+
+ return 0;
}
#else
-static void cache_of_override_properties(unsigned int cpu) { }
static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
struct cacheinfo *sib_leaf)
{
/*
- * For non-DT systems, assume unique level 1 cache, system-wide
+ * For non-DT/ACPI systems, assume unique level 1 caches, system-wide
* shared caches for all other levels. This will be used only if
* arch specific code has not populated shared_cpu_map
*/
@@ -217,6 +214,11 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
}
#endif
+int __weak cache_setup_acpi(unsigned int cpu)
+{
+ return -ENOTSUPP;
+}
+
static int cache_shared_cpu_map_setup(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
@@ -230,8 +232,8 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
if (of_have_populated_dt())
ret = cache_setup_of_node(cpu);
else if (!acpi_disabled)
- /* No cache property/hierarchy support yet in ACPI */
- ret = -ENOTSUPP;
+ ret = cache_setup_acpi(cpu);
+
if (ret)
return ret;
@@ -282,16 +284,11 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
}
- of_node_put(this_leaf->of_node);
+ if (of_have_populated_dt())
+ of_node_put(this_leaf->fw_token);
}
}
-static void cache_override_properties(unsigned int cpu)
-{
- if (of_have_populated_dt())
- return cache_of_override_properties(cpu);
-}
-
static void free_cache_attributes(unsigned int cpu)
{
if (!per_cpu_cacheinfo(cpu))
@@ -325,12 +322,17 @@ static int detect_cache_attributes(unsigned int cpu)
if (per_cpu_cacheinfo(cpu) == NULL)
return -ENOMEM;
+ /*
+ * populate_cache_leaves() may completely setup the cache leaves and
+ * shared_cpu_map or it may leave it partially setup.
+ */
ret = populate_cache_leaves(cpu);
if (ret)
goto free_ci;
/*
- * For systems using DT for cache hierarchy, of_node and shared_cpu_map
- * will be set up here only if they are not populated already
+ * For systems using DT for cache hierarchy, fw_token
+ * and shared_cpu_map will be set up here only if they are
+ * not populated already
*/
ret = cache_shared_cpu_map_setup(cpu);
if (ret) {
@@ -338,7 +340,6 @@ static int detect_cache_attributes(unsigned int cpu)
goto free_ci;
}
- cache_override_properties(cpu);
return 0;
free_ci:
OpenPOWER on IntegriCloud