KVM: arm64: Hide kvm_arm_pmu_available behind CONFIG_HW_PERF_EVENTS=y

Move the definition of kvm_arm_pmu_available to pmu-emul.c and, out of
"necessity", hide it behind CONFIG_HW_PERF_EVENTS.  Provide a stub for
the key's wrapper, kvm_arm_support_pmu_v3().  Moving the key's definition
out of perf.c will allow a future commit to delete perf.c entirely.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20211111020738.2512932-16-seanjc@google.com
This commit is contained in:
Sean Christopherson 2021-11-11 02:07:36 +00:00 committed by Peter Zijlstra
parent 7b517831a1
commit be399d824b
4 changed files with 16 additions and 9 deletions

View file

@ -102,7 +102,9 @@ KVM_NVHE_ALIAS(__stop___kvm_ex_table);
KVM_NVHE_ALIAS(kvm_arm_hyp_percpu_base);
/* PMU available static key */
#ifdef CONFIG_HW_PERF_EVENTS
KVM_NVHE_ALIAS(kvm_arm_pmu_available);
#endif
/* Position-independent library routines */
KVM_NVHE_ALIAS_HYP(clear_page, __pi_clear_page);

View file

@ -11,8 +11,6 @@
#include <asm/kvm_emulate.h>
DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
void kvm_perf_init(void)
{
kvm_register_perf_callbacks(NULL);

View file

@ -14,6 +14,8 @@
#include <kvm/arm_pmu.h>
#include <kvm/arm_vgic.h>
DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);

View file

@ -13,13 +13,6 @@
#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
#define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
static __always_inline bool kvm_arm_support_pmu_v3(void)
{
return static_branch_likely(&kvm_arm_pmu_available);
}
#ifdef CONFIG_HW_PERF_EVENTS
struct kvm_pmc {
@ -36,6 +29,13 @@ struct kvm_pmu {
struct irq_work overflow_work;
};
DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
static __always_inline bool kvm_arm_support_pmu_v3(void)
{
return static_branch_likely(&kvm_arm_pmu_available);
}
#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
@ -65,6 +65,11 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
struct kvm_pmu {
};
static inline bool kvm_arm_support_pmu_v3(void)
{
return false;
}
#define kvm_arm_pmu_irq_initialized(v) (false)
static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
u64 select_idx)