arm64: wrap and generalise counter read functions

In preparation for other uses of Activity Monitors (AMU) cycle counters,
place counter read functionality in generic functions that can reused:
read_corecnt() and read_constcnt().

As a result, implement update_freq_counters_refs() to replace
init_cpu_freq_invariance_counters() and both initialise and update
the per-cpu reference variables.

Signed-off-by: Ionela Voinescu <ionela.voinescu@arm.com>
Reviewed-by: Sudeep Holla <sudeep.holla@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20201106125334.21570-2-ionela.voinescu@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
Ionela Voinescu 2020-11-06 12:53:32 +00:00 committed by Catalin Marinas
parent f8394f232b
commit 4b9cf23c17
4 changed files with 23 additions and 14 deletions

View file

@ -765,6 +765,11 @@ static inline bool cpu_has_hw_af(void)
#ifdef CONFIG_ARM64_AMU_EXTN #ifdef CONFIG_ARM64_AMU_EXTN
/* Check whether the cpu supports the Activity Monitors Unit (AMU) */ /* Check whether the cpu supports the Activity Monitors Unit (AMU) */
extern bool cpu_has_amu_feat(int cpu); extern bool cpu_has_amu_feat(int cpu);
#else
static inline bool cpu_has_amu_feat(int cpu)
{
return false;
}
#endif #endif
static inline unsigned int get_vmid_bits(u64 mmfr1) static inline unsigned int get_vmid_bits(u64 mmfr1)

View file

@ -16,12 +16,14 @@ int pcibus_to_node(struct pci_bus *bus);
#include <linux/arch_topology.h> #include <linux/arch_topology.h>
void update_freq_counters_refs(void);
void topology_scale_freq_tick(void);
#ifdef CONFIG_ARM64_AMU_EXTN #ifdef CONFIG_ARM64_AMU_EXTN
/* /*
* Replace task scheduler's default counter-based * Replace task scheduler's default counter-based
* frequency-invariance scale factor setting. * frequency-invariance scale factor setting.
*/ */
void topology_scale_freq_tick(void);
#define arch_scale_freq_tick topology_scale_freq_tick #define arch_scale_freq_tick topology_scale_freq_tick
#endif /* CONFIG_ARM64_AMU_EXTN */ #endif /* CONFIG_ARM64_AMU_EXTN */

View file

@ -1526,16 +1526,13 @@ bool cpu_has_amu_feat(int cpu)
return cpumask_test_cpu(cpu, &amu_cpus); return cpumask_test_cpu(cpu, &amu_cpus);
} }
/* Initialize the use of AMU counters for frequency invariance */
extern void init_cpu_freq_invariance_counters(void);
static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap) static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap)
{ {
if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU)) { if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU)) {
pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n", pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n",
smp_processor_id()); smp_processor_id());
cpumask_set_cpu(smp_processor_id(), &amu_cpus); cpumask_set_cpu(smp_processor_id(), &amu_cpus);
init_cpu_freq_invariance_counters(); update_freq_counters_refs();
} }
} }

View file

@ -124,6 +124,12 @@ int __init parse_acpi_topology(void)
#endif #endif
#ifdef CONFIG_ARM64_AMU_EXTN #ifdef CONFIG_ARM64_AMU_EXTN
#define read_corecnt() read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0)
#define read_constcnt() read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0)
#else
#define read_corecnt() (0UL)
#define read_constcnt() (0UL)
#endif
#undef pr_fmt #undef pr_fmt
#define pr_fmt(fmt) "AMU: " fmt #define pr_fmt(fmt) "AMU: " fmt
@ -133,13 +139,10 @@ static DEFINE_PER_CPU(u64, arch_const_cycles_prev);
static DEFINE_PER_CPU(u64, arch_core_cycles_prev); static DEFINE_PER_CPU(u64, arch_core_cycles_prev);
static cpumask_var_t amu_fie_cpus; static cpumask_var_t amu_fie_cpus;
/* Initialize counter reference per-cpu variables for the current CPU */ void update_freq_counters_refs(void)
void init_cpu_freq_invariance_counters(void)
{ {
this_cpu_write(arch_core_cycles_prev, this_cpu_write(arch_core_cycles_prev, read_corecnt());
read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0)); this_cpu_write(arch_const_cycles_prev, read_constcnt());
this_cpu_write(arch_const_cycles_prev,
read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0));
} }
static int validate_cpu_freq_invariance_counters(int cpu) static int validate_cpu_freq_invariance_counters(int cpu)
@ -280,11 +283,14 @@ void topology_scale_freq_tick(void)
if (!cpumask_test_cpu(cpu, amu_fie_cpus)) if (!cpumask_test_cpu(cpu, amu_fie_cpus))
return; return;
const_cnt = read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0);
core_cnt = read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0);
prev_const_cnt = this_cpu_read(arch_const_cycles_prev); prev_const_cnt = this_cpu_read(arch_const_cycles_prev);
prev_core_cnt = this_cpu_read(arch_core_cycles_prev); prev_core_cnt = this_cpu_read(arch_core_cycles_prev);
update_freq_counters_refs();
const_cnt = this_cpu_read(arch_const_cycles_prev);
core_cnt = this_cpu_read(arch_core_cycles_prev);
if (unlikely(core_cnt <= prev_core_cnt || if (unlikely(core_cnt <= prev_core_cnt ||
const_cnt <= prev_const_cnt)) const_cnt <= prev_const_cnt))
goto store_and_exit; goto store_and_exit;
@ -309,4 +315,3 @@ void topology_scale_freq_tick(void)
this_cpu_write(arch_core_cycles_prev, core_cnt); this_cpu_write(arch_core_cycles_prev, core_cnt);
this_cpu_write(arch_const_cycles_prev, const_cnt); this_cpu_write(arch_const_cycles_prev, const_cnt);
} }
#endif /* CONFIG_ARM64_AMU_EXTN */