|
@@ -206,7 +206,7 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
|
|
|
struct cacheinfo *sib_leaf)
|
|
|
{
|
|
|
/*
|
|
|
- * For non-DT systems, assume unique level 1 cache, system-wide
|
|
|
+ * For non-DT/ACPI systems, assume unique level 1 caches, system-wide
|
|
|
* shared caches for all other levels. This will be used only if
|
|
|
* arch specific code has not populated shared_cpu_map
|
|
|
*/
|
|
@@ -214,6 +214,11 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+int __weak cache_setup_acpi(unsigned int cpu)
|
|
|
+{
|
|
|
+ return -ENOTSUPP;
|
|
|
+}
|
|
|
+
|
|
|
static int cache_shared_cpu_map_setup(unsigned int cpu)
|
|
|
{
|
|
|
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
|
@@ -227,8 +232,8 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
|
|
|
if (of_have_populated_dt())
|
|
|
ret = cache_setup_of_node(cpu);
|
|
|
else if (!acpi_disabled)
|
|
|
- /* No cache property/hierarchy support yet in ACPI */
|
|
|
- ret = -ENOTSUPP;
|
|
|
+ ret = cache_setup_acpi(cpu);
|
|
|
+
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
@@ -279,7 +284,8 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
|
|
|
cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
|
|
|
cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
|
|
|
}
|
|
|
- of_node_put(this_leaf->fw_token);
|
|
|
+ if (of_have_populated_dt())
|
|
|
+ of_node_put(this_leaf->fw_token);
|
|
|
}
|
|
|
}
|
|
|
|