drivers: base cacheinfo: Add support for ACPI based firmware tables

Call ACPI cache parsing routines from base cacheinfo code if ACPI
is enabled. Also stub out cache_setup_acpi and acpi_find_last_cache_level
so that individual architectures can enable ACPI topology parsing.

Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Vijaya Kumar K <vkilari@codeaurora.org>
Tested-by: Xiongfeng Wang <wangxiongfeng2@huawei.com>
Tested-by: Tomasz Nowicki <Tomasz.Nowicki@cavium.com>
Acked-by: Sudeep Holla <sudeep.holla@arm.com>
Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
Jeremy Linton 2018-05-11 18:58:02 -05:00 committed by Catalin Marinas
parent 0ce8223223
commit 582b468bdc
2 changed files with 27 additions and 4 deletions

View File

@ -206,7 +206,7 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
struct cacheinfo *sib_leaf)
{
/*
* For non-DT systems, assume unique level 1 cache, system-wide
* For non-DT/ACPI systems, assume unique level 1 caches, system-wide
* shared caches for all other levels. This will be used only if
* arch specific code has not populated shared_cpu_map
*/
@ -214,6 +214,11 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
}
#endif
int __weak cache_setup_acpi(unsigned int cpu)
{
return -ENOTSUPP;
}
static int cache_shared_cpu_map_setup(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
@ -227,8 +232,8 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
if (of_have_populated_dt())
ret = cache_setup_of_node(cpu);
else if (!acpi_disabled)
/* No cache property/hierarchy support yet in ACPI */
ret = -ENOTSUPP;
ret = cache_setup_acpi(cpu);
if (ret)
return ret;
@ -279,7 +284,8 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
}
of_node_put(this_leaf->fw_token);
if (of_have_populated_dt())
of_node_put(this_leaf->fw_token);
}
}

View File

@ -97,6 +97,23 @@ int func(unsigned int cpu) \
struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
int init_cache_level(unsigned int cpu);
int populate_cache_leaves(unsigned int cpu);
int cache_setup_acpi(unsigned int cpu);
#ifndef CONFIG_ACPI
/*
* acpi_find_last_cache_level is only called on ACPI enabled
* platforms using the PPTT for topology. This means that if
* the platform supports other firmware configuration methods
* we need to stub out the call when ACPI is disabled.
* ACPI enabled platforms not using PPTT won't be making calls
* to this function so we need not worry about them.
*/
static inline int acpi_find_last_cache_level(unsigned int cpu)
{
return 0;
}
#else
int acpi_find_last_cache_level(unsigned int cpu);
#endif
const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf);