KVM: x86/pmu: Update sample period in pmc_write_counter()

Update a PMC's sample period in pmc_write_counter() to deduplicate code
across all callers of pmc_write_counter().  Opportunistically move
pmc_write_counter() into pmc.c now that it's doing more work.  WRMSR isn't
such a hot path that an extra CALL+RET pair will be problematic, and the
order of function definitions needs to be changed anyways, i.e. now is a
convenient time to eat the churn.

Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Link: https://lore.kernel.org/r/20231103230541.352265-6-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
Sean Christopherson 2023-11-03 16:05:40 -07:00
parent ec61b2306d
commit 89acf1237b
4 changed files with 28 additions and 27 deletions

View file

@ -161,6 +161,15 @@ static u64 pmc_get_pebs_precise_level(struct kvm_pmc *pmc)
return 1;
}
static u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
{
u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
if (!sample_period)
sample_period = pmc_bitmask(pmc) + 1;
return sample_period;
}
static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config,
bool exclude_user, bool exclude_kernel,
bool intr)
@ -268,6 +277,24 @@ static void pmc_stop_counter(struct kvm_pmc *pmc)
}
}
static void pmc_update_sample_period(struct kvm_pmc *pmc)
{
if (!pmc->perf_event || pmc->is_paused ||
!is_sampling_event(pmc->perf_event))
return;
perf_event_period(pmc->perf_event,
get_sample_period(pmc, pmc->counter));
}
void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
{
pmc->counter += val - pmc_read_counter(pmc);
pmc->counter &= pmc_bitmask(pmc);
pmc_update_sample_period(pmc);
}
EXPORT_SYMBOL_GPL(pmc_write_counter);
static int filter_cmp(const void *pa, const void *pb, u64 mask)
{
u64 a = *(u64 *)pa & mask;

View file

@ -74,11 +74,7 @@ static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
return counter & pmc_bitmask(pmc);
}
static inline void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
{
pmc->counter += val - pmc_read_counter(pmc);
pmc->counter &= pmc_bitmask(pmc);
}
void pmc_write_counter(struct kvm_pmc *pmc, u64 val);
static inline bool pmc_is_gp(struct kvm_pmc *pmc)
{
@ -128,25 +124,6 @@ static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
return NULL;
}
static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
{
u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
if (!sample_period)
sample_period = pmc_bitmask(pmc) + 1;
return sample_period;
}
static inline void pmc_update_sample_period(struct kvm_pmc *pmc)
{
if (!pmc->perf_event || pmc->is_paused ||
!is_sampling_event(pmc->perf_event))
return;
perf_event_period(pmc->perf_event,
get_sample_period(pmc, pmc->counter));
}
static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
{
struct kvm_pmu *pmu = pmc_to_pmu(pmc);

View file

@ -161,7 +161,6 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
if (pmc) {
pmc_write_counter(pmc, data);
pmc_update_sample_period(pmc);
return 0;
}
/* MSR_EVNTSELn */

View file

@ -437,11 +437,9 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
!(msr & MSR_PMC_FULL_WIDTH_BIT))
data = (s64)(s32)data;
pmc_write_counter(pmc, data);
pmc_update_sample_period(pmc);
break;
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
pmc_write_counter(pmc, data);
pmc_update_sample_period(pmc);
break;
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
reserved_bits = pmu->reserved_bits;