2019-06-04 08:11:32 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2011-11-10 12:57:22 +00:00
|
|
|
/*
|
2012-06-28 07:23:08 +00:00
|
|
|
* Kernel-based Virtual Machine -- Performance Monitoring Unit support
|
2011-11-10 12:57:22 +00:00
|
|
|
*
|
2015-06-19 13:45:05 +00:00
|
|
|
* Copyright 2015 Red Hat, Inc. and/or its affiliates.
|
2011-11-10 12:57:22 +00:00
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Avi Kivity <avi@redhat.com>
|
|
|
|
* Gleb Natapov <gleb@redhat.com>
|
2015-06-19 13:45:05 +00:00
|
|
|
* Wei Huang <wei@redhat.com>
|
2011-11-10 12:57:22 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
#include <linux/perf_event.h>
|
2014-08-20 10:25:52 +00:00
|
|
|
#include <asm/perf_event.h>
|
2011-11-10 12:57:22 +00:00
|
|
|
#include "x86.h"
|
|
|
|
#include "cpuid.h"
|
|
|
|
#include "lapic.h"
|
2015-06-19 11:54:23 +00:00
|
|
|
#include "pmu.h"
|
2011-11-10 12:57:22 +00:00
|
|
|
|
2019-07-18 18:38:18 +00:00
|
|
|
/* This is enough to filter the vast majority of currently defined events. */
|
|
|
|
#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
|
2019-07-11 01:25:15 +00:00
|
|
|
|
2015-06-19 13:45:05 +00:00
|
|
|
/* NOTE:
|
|
|
|
* - Each perf counter is defined as "struct kvm_pmc";
|
|
|
|
* - There are two types of perf counters: general purpose (gp) and fixed.
|
|
|
|
* gp counters are stored in gp_counters[] and fixed counters are stored
|
|
|
|
* in fixed_counters[] respectively. Both of them are part of "struct
|
|
|
|
* kvm_pmu";
|
|
|
|
* - pmu.c understands the difference between gp counters and fixed counters.
|
|
|
|
* However AMD doesn't support fixed-counters;
|
|
|
|
* - There are three types of index to access perf counters (PMC):
|
|
|
|
* 1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
|
|
|
|
* has MSR_K7_PERFCTRn.
|
|
|
|
* 2. MSR Index (named idx): This normally is used by RDPMC instruction.
|
|
|
|
* For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
|
|
|
|
* C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
|
|
|
|
* that it also supports fixed counters. idx can be used to as index to
|
|
|
|
* gp and fixed counters.
|
|
|
|
* 3. Global PMC Index (named pmc): pmc is an index specific to PMU
|
|
|
|
* code. Each pmc, stored in kvm_pmc.idx field, is unique across
|
|
|
|
* all perf counters (both gp and fixed). The mapping relationship
|
|
|
|
* between pmc and perf counters is as the following:
|
|
|
|
* * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters
|
|
|
|
* [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
|
|
|
|
* * AMD: [0 .. AMD64_NUM_COUNTERS-1] <=> gp counters
|
|
|
|
*/
|
2011-11-10 12:57:22 +00:00
|
|
|
|
2015-06-19 11:44:45 +00:00
|
|
|
static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
|
2011-11-10 12:57:22 +00:00
|
|
|
{
|
2015-06-19 12:00:33 +00:00
|
|
|
struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
|
|
|
|
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
|
2011-11-10 12:57:22 +00:00
|
|
|
|
2015-06-19 11:44:45 +00:00
|
|
|
kvm_pmu_deliver_pmi(vcpu);
|
2011-11-10 12:57:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_perf_overflow(struct perf_event *perf_event,
|
|
|
|
struct perf_sample_data *data,
|
|
|
|
struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct kvm_pmc *pmc = perf_event->overflow_handler_context;
|
2015-06-19 12:00:33 +00:00
|
|
|
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
|
2015-06-19 12:15:28 +00:00
|
|
|
|
2019-10-21 10:55:04 +00:00
|
|
|
if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) {
|
2014-04-18 00:35:08 +00:00
|
|
|
__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
|
|
|
|
kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
|
|
|
|
}
|
2011-11-10 12:57:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_perf_overflow_intr(struct perf_event *perf_event,
|
2015-06-19 12:15:28 +00:00
|
|
|
struct perf_sample_data *data,
|
|
|
|
struct pt_regs *regs)
|
2011-11-10 12:57:22 +00:00
|
|
|
{
|
|
|
|
struct kvm_pmc *pmc = perf_event->overflow_handler_context;
|
2015-06-19 12:00:33 +00:00
|
|
|
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
|
2015-06-19 12:15:28 +00:00
|
|
|
|
2019-10-21 10:55:04 +00:00
|
|
|
if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) {
|
2014-04-18 00:35:08 +00:00
|
|
|
__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
|
2011-11-10 12:57:22 +00:00
|
|
|
kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
|
2015-06-19 12:15:28 +00:00
|
|
|
|
2011-11-10 12:57:22 +00:00
|
|
|
/*
|
|
|
|
* Inject PMI. If vcpu was in a guest mode during NMI PMI
|
|
|
|
* can be ejected on a guest mode re-entry. Otherwise we can't
|
|
|
|
* be sure that vcpu wasn't executing hlt instruction at the
|
2015-06-19 12:15:28 +00:00
|
|
|
* time of vmexit and is not going to re-enter guest mode until
|
2011-11-10 12:57:22 +00:00
|
|
|
* woken up. So we should wake it, but this is impossible from
|
|
|
|
* NMI context. Do it from irq work instead.
|
|
|
|
*/
|
|
|
|
if (!kvm_is_in_guest())
|
2015-06-19 12:00:33 +00:00
|
|
|
irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
|
2011-11-10 12:57:22 +00:00
|
|
|
else
|
|
|
|
kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-19 11:44:45 +00:00
|
|
|
static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
|
2015-06-19 12:15:28 +00:00
|
|
|
unsigned config, bool exclude_user,
|
|
|
|
bool exclude_kernel, bool intr,
|
|
|
|
bool in_tx, bool in_tx_cp)
|
2011-11-10 12:57:22 +00:00
|
|
|
{
|
|
|
|
struct perf_event *event;
|
|
|
|
struct perf_event_attr attr = {
|
|
|
|
.type = type,
|
|
|
|
.size = sizeof(attr),
|
|
|
|
.pinned = true,
|
|
|
|
.exclude_idle = true,
|
|
|
|
.exclude_host = 1,
|
|
|
|
.exclude_user = exclude_user,
|
|
|
|
.exclude_kernel = exclude_kernel,
|
|
|
|
.config = config,
|
|
|
|
};
|
2015-06-19 12:15:28 +00:00
|
|
|
|
2020-02-22 02:34:13 +00:00
|
|
|
attr.sample_period = get_sample_period(pmc, pmc->counter);
|
KVM: x86: never specify a sample period for virtualized in_tx_cp counters
pmc_reprogram_counter() always sets a sample period based on the value of
pmc->counter. However, hsw_hw_config() rejects sample periods less than
2^31 - 1. So for example, if a KVM guest does
struct perf_event_attr attr;
memset(&attr, 0, sizeof(attr));
attr.type = PERF_TYPE_RAW;
attr.size = sizeof(attr);
attr.config = 0x2005101c4; // conditional branches retired IN_TXCP
attr.sample_period = 0;
int fd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, 0);
ioctl(fd, PERF_EVENT_IOC_DISABLE, 0);
ioctl(fd, PERF_EVENT_IOC_ENABLE, 0);
the guest kernel counts some conditional branch events, then updates the
virtual PMU register with a nonzero count. The host reaches
pmc_reprogram_counter() with nonzero pmc->counter, triggers EOPNOTSUPP
in hsw_hw_config(), prints "kvm_pmu: event creation failed" in
pmc_reprogram_counter(), and silently (from the guest's point of view) stops
counting events.
We fix event counting by forcing attr.sample_period to always be zero for
in_tx_cp counters. Sampling doesn't work, but it already didn't work and
can't be fixed without major changes to the approach in hsw_hw_config().
Signed-off-by: Robert O'Callahan <robert@ocallahan.org>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
2017-02-01 04:06:11 +00:00
|
|
|
|
2013-07-18 22:57:02 +00:00
|
|
|
if (in_tx)
|
|
|
|
attr.config |= HSW_IN_TX;
|
KVM: x86: never specify a sample period for virtualized in_tx_cp counters
pmc_reprogram_counter() always sets a sample period based on the value of
pmc->counter. However, hsw_hw_config() rejects sample periods less than
2^31 - 1. So for example, if a KVM guest does
struct perf_event_attr attr;
memset(&attr, 0, sizeof(attr));
attr.type = PERF_TYPE_RAW;
attr.size = sizeof(attr);
attr.config = 0x2005101c4; // conditional branches retired IN_TXCP
attr.sample_period = 0;
int fd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, 0);
ioctl(fd, PERF_EVENT_IOC_DISABLE, 0);
ioctl(fd, PERF_EVENT_IOC_ENABLE, 0);
the guest kernel counts some conditional branch events, then updates the
virtual PMU register with a nonzero count. The host reaches
pmc_reprogram_counter() with nonzero pmc->counter, triggers EOPNOTSUPP
in hsw_hw_config(), prints "kvm_pmu: event creation failed" in
pmc_reprogram_counter(), and silently (from the guest's point of view) stops
counting events.
We fix event counting by forcing attr.sample_period to always be zero for
in_tx_cp counters. Sampling doesn't work, but it already didn't work and
can't be fixed without major changes to the approach in hsw_hw_config().
Signed-off-by: Robert O'Callahan <robert@ocallahan.org>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
2017-02-01 04:06:11 +00:00
|
|
|
if (in_tx_cp) {
|
|
|
|
/*
|
|
|
|
* HSW_IN_TX_CHECKPOINTED is not supported with nonzero
|
|
|
|
* period. Just clear the sample period so at least
|
|
|
|
* allocating the counter doesn't fail.
|
|
|
|
*/
|
|
|
|
attr.sample_period = 0;
|
2013-07-18 22:57:02 +00:00
|
|
|
attr.config |= HSW_IN_TX_CHECKPOINTED;
|
KVM: x86: never specify a sample period for virtualized in_tx_cp counters
pmc_reprogram_counter() always sets a sample period based on the value of
pmc->counter. However, hsw_hw_config() rejects sample periods less than
2^31 - 1. So for example, if a KVM guest does
struct perf_event_attr attr;
memset(&attr, 0, sizeof(attr));
attr.type = PERF_TYPE_RAW;
attr.size = sizeof(attr);
attr.config = 0x2005101c4; // conditional branches retired IN_TXCP
attr.sample_period = 0;
int fd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, 0);
ioctl(fd, PERF_EVENT_IOC_DISABLE, 0);
ioctl(fd, PERF_EVENT_IOC_ENABLE, 0);
the guest kernel counts some conditional branch events, then updates the
virtual PMU register with a nonzero count. The host reaches
pmc_reprogram_counter() with nonzero pmc->counter, triggers EOPNOTSUPP
in hsw_hw_config(), prints "kvm_pmu: event creation failed" in
pmc_reprogram_counter(), and silently (from the guest's point of view) stops
counting events.
We fix event counting by forcing attr.sample_period to always be zero for
in_tx_cp counters. Sampling doesn't work, but it already didn't work and
can't be fixed without major changes to the approach in hsw_hw_config().
Signed-off-by: Robert O'Callahan <robert@ocallahan.org>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
2017-02-01 04:06:11 +00:00
|
|
|
}
|
2011-11-10 12:57:22 +00:00
|
|
|
|
|
|
|
event = perf_event_create_kernel_counter(&attr, -1, current,
|
|
|
|
intr ? kvm_perf_overflow_intr :
|
|
|
|
kvm_perf_overflow, pmc);
|
|
|
|
if (IS_ERR(event)) {
|
2019-07-18 05:35:14 +00:00
|
|
|
pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
|
|
|
|
PTR_ERR(event), pmc->idx);
|
2011-11-10 12:57:22 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
pmc->perf_event = event;
|
2019-10-27 10:52:43 +00:00
|
|
|
pmc_to_pmu(pmc)->event_count++;
|
2019-10-21 10:55:04 +00:00
|
|
|
clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
|
2011-11-10 12:57:22 +00:00
|
|
|
}
|
|
|
|
|
KVM: x86/vPMU: Reuse perf_event to avoid unnecessary pmc_reprogram_counter
The perf_event_create_kernel_counter() in the pmc_reprogram_counter() is
a heavyweight and high-frequency operation, especially when host disables
the watchdog (maximum 21000000 ns) which leads to an unacceptable latency
of the guest NMI handler. It limits the use of vPMUs in the guest.
When a vPMC is fully enabled, the legacy reprogram_*_counter() would stop
and release its existing perf_event (if any) every time EVEN in most cases
almost the same requested perf_event will be created and configured again.
For each vPMC, if the reuqested config ('u64 eventsel' for gp and 'u8 ctrl'
for fixed) is the same as its current config AND a new sample period based
on pmc->counter is accepted by host perf interface, the current event could
be reused safely as a new created one does. Otherwise, do release the
undesirable perf_event and reprogram a new one as usual.
It's light-weight to call pmc_pause_counter (disable, read and reset event)
and pmc_resume_counter (recalibrate period and re-enable event) as guest
expects instead of release-and-create again on any condition. Compared to
use the filterable event->attr or hw.config, a new 'u64 current_config'
field is added to save the last original programed config for each vPMC.
Based on this implementation, the number of calls to pmc_reprogram_counter
is reduced by ~82.5% for a gp sampling event and ~99.9% for a fixed event.
In the usage of multiplexing perf sampling mode, the average latency of the
guest NMI handler is reduced from 104923 ns to 48393 ns (~2.16x speed up).
If host disables watchdog, the minimum latecy of guest NMI handler could be
speed up at ~3413x (from 20407603 to 5979 ns) and at ~786x in the average.
Suggested-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Like Xu <like.xu@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-10-27 10:52:42 +00:00
|
|
|
static void pmc_pause_counter(struct kvm_pmc *pmc)
|
|
|
|
{
|
|
|
|
u64 counter = pmc->counter;
|
|
|
|
|
|
|
|
if (!pmc->perf_event)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* update counter, reset event value to avoid redundant accumulation */
|
|
|
|
counter += perf_event_pause(pmc->perf_event, true);
|
|
|
|
pmc->counter = counter & pmc_bitmask(pmc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool pmc_resume_counter(struct kvm_pmc *pmc)
|
|
|
|
{
|
|
|
|
if (!pmc->perf_event)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* recalibrate sample period and check if it's accepted by perf core */
|
|
|
|
if (perf_event_period(pmc->perf_event,
|
2020-02-22 02:34:13 +00:00
|
|
|
get_sample_period(pmc, pmc->counter)))
|
KVM: x86/vPMU: Reuse perf_event to avoid unnecessary pmc_reprogram_counter
The perf_event_create_kernel_counter() in the pmc_reprogram_counter() is
a heavyweight and high-frequency operation, especially when host disables
the watchdog (maximum 21000000 ns) which leads to an unacceptable latency
of the guest NMI handler. It limits the use of vPMUs in the guest.
When a vPMC is fully enabled, the legacy reprogram_*_counter() would stop
and release its existing perf_event (if any) every time EVEN in most cases
almost the same requested perf_event will be created and configured again.
For each vPMC, if the reuqested config ('u64 eventsel' for gp and 'u8 ctrl'
for fixed) is the same as its current config AND a new sample period based
on pmc->counter is accepted by host perf interface, the current event could
be reused safely as a new created one does. Otherwise, do release the
undesirable perf_event and reprogram a new one as usual.
It's light-weight to call pmc_pause_counter (disable, read and reset event)
and pmc_resume_counter (recalibrate period and re-enable event) as guest
expects instead of release-and-create again on any condition. Compared to
use the filterable event->attr or hw.config, a new 'u64 current_config'
field is added to save the last original programed config for each vPMC.
Based on this implementation, the number of calls to pmc_reprogram_counter
is reduced by ~82.5% for a gp sampling event and ~99.9% for a fixed event.
In the usage of multiplexing perf sampling mode, the average latency of the
guest NMI handler is reduced from 104923 ns to 48393 ns (~2.16x speed up).
If host disables watchdog, the minimum latecy of guest NMI handler could be
speed up at ~3413x (from 20407603 to 5979 ns) and at ~786x in the average.
Suggested-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Like Xu <like.xu@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-10-27 10:52:42 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
/* reuse perf_event to serve as pmc_reprogram_counter() does*/
|
|
|
|
perf_event_enable(pmc->perf_event);
|
|
|
|
|
|
|
|
clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-06-19 13:45:05 +00:00
|
|
|
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
|
2011-11-10 12:57:22 +00:00
|
|
|
{
|
|
|
|
unsigned config, type = PERF_TYPE_RAW;
|
|
|
|
u8 event_select, unit_mask;
|
2019-07-11 01:25:15 +00:00
|
|
|
struct kvm *kvm = pmc->vcpu->kvm;
|
|
|
|
struct kvm_pmu_event_filter *filter;
|
|
|
|
int i;
|
|
|
|
bool allow_event = true;
|
2011-11-10 12:57:22 +00:00
|
|
|
|
2012-02-26 14:55:40 +00:00
|
|
|
if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
|
|
|
|
printk_once("kvm pmu: pin control bit is ignored\n");
|
|
|
|
|
2011-11-10 12:57:22 +00:00
|
|
|
pmc->eventsel = eventsel;
|
|
|
|
|
KVM: x86/vPMU: Reuse perf_event to avoid unnecessary pmc_reprogram_counter
The perf_event_create_kernel_counter() in the pmc_reprogram_counter() is
a heavyweight and high-frequency operation, especially when host disables
the watchdog (maximum 21000000 ns) which leads to an unacceptable latency
of the guest NMI handler. It limits the use of vPMUs in the guest.
When a vPMC is fully enabled, the legacy reprogram_*_counter() would stop
and release its existing perf_event (if any) every time EVEN in most cases
almost the same requested perf_event will be created and configured again.
For each vPMC, if the reuqested config ('u64 eventsel' for gp and 'u8 ctrl'
for fixed) is the same as its current config AND a new sample period based
on pmc->counter is accepted by host perf interface, the current event could
be reused safely as a new created one does. Otherwise, do release the
undesirable perf_event and reprogram a new one as usual.
It's light-weight to call pmc_pause_counter (disable, read and reset event)
and pmc_resume_counter (recalibrate period and re-enable event) as guest
expects instead of release-and-create again on any condition. Compared to
use the filterable event->attr or hw.config, a new 'u64 current_config'
field is added to save the last original programed config for each vPMC.
Based on this implementation, the number of calls to pmc_reprogram_counter
is reduced by ~82.5% for a gp sampling event and ~99.9% for a fixed event.
In the usage of multiplexing perf sampling mode, the average latency of the
guest NMI handler is reduced from 104923 ns to 48393 ns (~2.16x speed up).
If host disables watchdog, the minimum latecy of guest NMI handler could be
speed up at ~3413x (from 20407603 to 5979 ns) and at ~786x in the average.
Suggested-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Like Xu <like.xu@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-10-27 10:52:42 +00:00
|
|
|
pmc_pause_counter(pmc);
|
2011-11-10 12:57:22 +00:00
|
|
|
|
2015-06-19 11:44:45 +00:00
|
|
|
if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
|
2011-11-10 12:57:22 +00:00
|
|
|
return;
|
|
|
|
|
2019-07-11 01:25:15 +00:00
|
|
|
filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
|
|
|
|
if (filter) {
|
|
|
|
for (i = 0; i < filter->nevents; i++)
|
|
|
|
if (filter->events[i] ==
|
|
|
|
(eventsel & AMD64_RAW_EVENT_MASK_NB))
|
|
|
|
break;
|
|
|
|
if (filter->action == KVM_PMU_EVENT_ALLOW &&
|
|
|
|
i == filter->nevents)
|
|
|
|
allow_event = false;
|
|
|
|
if (filter->action == KVM_PMU_EVENT_DENY &&
|
|
|
|
i < filter->nevents)
|
|
|
|
allow_event = false;
|
|
|
|
}
|
|
|
|
if (!allow_event)
|
|
|
|
return;
|
|
|
|
|
2011-11-10 12:57:22 +00:00
|
|
|
event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
|
|
|
|
unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
|
|
|
|
|
2012-02-26 14:55:41 +00:00
|
|
|
if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
|
2015-06-19 12:15:28 +00:00
|
|
|
ARCH_PERFMON_EVENTSEL_INV |
|
|
|
|
ARCH_PERFMON_EVENTSEL_CMASK |
|
|
|
|
HSW_IN_TX |
|
|
|
|
HSW_IN_TX_CHECKPOINTED))) {
|
2020-03-21 20:26:00 +00:00
|
|
|
config = kvm_x86_ops.pmu_ops->find_arch_event(pmc_to_pmu(pmc),
|
2015-06-19 13:45:05 +00:00
|
|
|
event_select,
|
|
|
|
unit_mask);
|
2011-11-10 12:57:22 +00:00
|
|
|
if (config != PERF_COUNT_HW_MAX)
|
|
|
|
type = PERF_TYPE_HARDWARE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type == PERF_TYPE_RAW)
|
|
|
|
config = eventsel & X86_RAW_EVENT_MASK;
|
|
|
|
|
KVM: x86/vPMU: Reuse perf_event to avoid unnecessary pmc_reprogram_counter
The perf_event_create_kernel_counter() in the pmc_reprogram_counter() is
a heavyweight and high-frequency operation, especially when host disables
the watchdog (maximum 21000000 ns) which leads to an unacceptable latency
of the guest NMI handler. It limits the use of vPMUs in the guest.
When a vPMC is fully enabled, the legacy reprogram_*_counter() would stop
and release its existing perf_event (if any) every time EVEN in most cases
almost the same requested perf_event will be created and configured again.
For each vPMC, if the reuqested config ('u64 eventsel' for gp and 'u8 ctrl'
for fixed) is the same as its current config AND a new sample period based
on pmc->counter is accepted by host perf interface, the current event could
be reused safely as a new created one does. Otherwise, do release the
undesirable perf_event and reprogram a new one as usual.
It's light-weight to call pmc_pause_counter (disable, read and reset event)
and pmc_resume_counter (recalibrate period and re-enable event) as guest
expects instead of release-and-create again on any condition. Compared to
use the filterable event->attr or hw.config, a new 'u64 current_config'
field is added to save the last original programed config for each vPMC.
Based on this implementation, the number of calls to pmc_reprogram_counter
is reduced by ~82.5% for a gp sampling event and ~99.9% for a fixed event.
In the usage of multiplexing perf sampling mode, the average latency of the
guest NMI handler is reduced from 104923 ns to 48393 ns (~2.16x speed up).
If host disables watchdog, the minimum latecy of guest NMI handler could be
speed up at ~3413x (from 20407603 to 5979 ns) and at ~786x in the average.
Suggested-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Like Xu <like.xu@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-10-27 10:52:42 +00:00
|
|
|
if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
|
|
|
|
return;
|
|
|
|
|
|
|
|
pmc_release_perf_event(pmc);
|
|
|
|
|
|
|
|
pmc->current_config = eventsel;
|
2015-06-19 11:44:45 +00:00
|
|
|
pmc_reprogram_counter(pmc, type, config,
|
2015-06-19 12:15:28 +00:00
|
|
|
!(eventsel & ARCH_PERFMON_EVENTSEL_USR),
|
|
|
|
!(eventsel & ARCH_PERFMON_EVENTSEL_OS),
|
|
|
|
eventsel & ARCH_PERFMON_EVENTSEL_INT,
|
|
|
|
(eventsel & HSW_IN_TX),
|
|
|
|
(eventsel & HSW_IN_TX_CHECKPOINTED));
|
2011-11-10 12:57:22 +00:00
|
|
|
}
|
2015-06-19 13:45:05 +00:00
|
|
|
EXPORT_SYMBOL_GPL(reprogram_gp_counter);
|
2011-11-10 12:57:22 +00:00
|
|
|
|
2015-06-19 13:45:05 +00:00
|
|
|
void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
|
2011-11-10 12:57:22 +00:00
|
|
|
{
|
2015-06-19 12:15:28 +00:00
|
|
|
unsigned en_field = ctrl & 0x3;
|
|
|
|
bool pmi = ctrl & 0x8;
|
2019-07-18 18:38:18 +00:00
|
|
|
struct kvm_pmu_event_filter *filter;
|
|
|
|
struct kvm *kvm = pmc->vcpu->kvm;
|
2011-11-10 12:57:22 +00:00
|
|
|
|
KVM: x86/vPMU: Reuse perf_event to avoid unnecessary pmc_reprogram_counter
The perf_event_create_kernel_counter() in the pmc_reprogram_counter() is
a heavyweight and high-frequency operation, especially when host disables
the watchdog (maximum 21000000 ns) which leads to an unacceptable latency
of the guest NMI handler. It limits the use of vPMUs in the guest.
When a vPMC is fully enabled, the legacy reprogram_*_counter() would stop
and release its existing perf_event (if any) every time EVEN in most cases
almost the same requested perf_event will be created and configured again.
For each vPMC, if the reuqested config ('u64 eventsel' for gp and 'u8 ctrl'
for fixed) is the same as its current config AND a new sample period based
on pmc->counter is accepted by host perf interface, the current event could
be reused safely as a new created one does. Otherwise, do release the
undesirable perf_event and reprogram a new one as usual.
It's light-weight to call pmc_pause_counter (disable, read and reset event)
and pmc_resume_counter (recalibrate period and re-enable event) as guest
expects instead of release-and-create again on any condition. Compared to
use the filterable event->attr or hw.config, a new 'u64 current_config'
field is added to save the last original programed config for each vPMC.
Based on this implementation, the number of calls to pmc_reprogram_counter
is reduced by ~82.5% for a gp sampling event and ~99.9% for a fixed event.
In the usage of multiplexing perf sampling mode, the average latency of the
guest NMI handler is reduced from 104923 ns to 48393 ns (~2.16x speed up).
If host disables watchdog, the minimum latecy of guest NMI handler could be
speed up at ~3413x (from 20407603 to 5979 ns) and at ~786x in the average.
Suggested-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Like Xu <like.xu@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-10-27 10:52:42 +00:00
|
|
|
pmc_pause_counter(pmc);
|
2011-11-10 12:57:22 +00:00
|
|
|
|
2015-06-19 12:15:28 +00:00
|
|
|
if (!en_field || !pmc_is_enabled(pmc))
|
2011-11-10 12:57:22 +00:00
|
|
|
return;
|
|
|
|
|
2019-07-18 18:38:18 +00:00
|
|
|
filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
|
|
|
|
if (filter) {
|
|
|
|
if (filter->action == KVM_PMU_EVENT_DENY &&
|
|
|
|
test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
|
|
|
|
return;
|
|
|
|
if (filter->action == KVM_PMU_EVENT_ALLOW &&
|
|
|
|
!test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
KVM: x86/vPMU: Reuse perf_event to avoid unnecessary pmc_reprogram_counter
The perf_event_create_kernel_counter() in the pmc_reprogram_counter() is
a heavyweight and high-frequency operation, especially when host disables
the watchdog (maximum 21000000 ns) which leads to an unacceptable latency
of the guest NMI handler. It limits the use of vPMUs in the guest.
When a vPMC is fully enabled, the legacy reprogram_*_counter() would stop
and release its existing perf_event (if any) every time EVEN in most cases
almost the same requested perf_event will be created and configured again.
For each vPMC, if the reuqested config ('u64 eventsel' for gp and 'u8 ctrl'
for fixed) is the same as its current config AND a new sample period based
on pmc->counter is accepted by host perf interface, the current event could
be reused safely as a new created one does. Otherwise, do release the
undesirable perf_event and reprogram a new one as usual.
It's light-weight to call pmc_pause_counter (disable, read and reset event)
and pmc_resume_counter (recalibrate period and re-enable event) as guest
expects instead of release-and-create again on any condition. Compared to
use the filterable event->attr or hw.config, a new 'u64 current_config'
field is added to save the last original programed config for each vPMC.
Based on this implementation, the number of calls to pmc_reprogram_counter
is reduced by ~82.5% for a gp sampling event and ~99.9% for a fixed event.
In the usage of multiplexing perf sampling mode, the average latency of the
guest NMI handler is reduced from 104923 ns to 48393 ns (~2.16x speed up).
If host disables watchdog, the minimum latecy of guest NMI handler could be
speed up at ~3413x (from 20407603 to 5979 ns) and at ~786x in the average.
Suggested-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Like Xu <like.xu@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-10-27 10:52:42 +00:00
|
|
|
if (pmc->current_config == (u64)ctrl && pmc_resume_counter(pmc))
|
|
|
|
return;
|
|
|
|
|
|
|
|
pmc_release_perf_event(pmc);
|
|
|
|
|
|
|
|
pmc->current_config = (u64)ctrl;
|
2015-06-19 11:44:45 +00:00
|
|
|
pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
|
2020-03-21 20:26:00 +00:00
|
|
|
kvm_x86_ops.pmu_ops->find_fixed_event(idx),
|
2015-06-19 12:15:28 +00:00
|
|
|
!(en_field & 0x2), /* exclude user */
|
|
|
|
!(en_field & 0x1), /* exclude kernel */
|
|
|
|
pmi, false, false);
|
2011-11-10 12:57:22 +00:00
|
|
|
}
|
2015-06-19 13:45:05 +00:00
|
|
|
EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
|
2011-11-10 12:57:22 +00:00
|
|
|
|
2015-06-19 13:45:05 +00:00
|
|
|
void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
|
2011-11-10 12:57:22 +00:00
|
|
|
{
|
2020-03-21 20:26:00 +00:00
|
|
|
struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
|
2011-11-10 12:57:22 +00:00
|
|
|
|
|
|
|
if (!pmc)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (pmc_is_gp(pmc))
|
|
|
|
reprogram_gp_counter(pmc, pmc->eventsel);
|
|
|
|
else {
|
2015-06-19 12:15:28 +00:00
|
|
|
int idx = pmc_idx - INTEL_PMC_IDX_FIXED;
|
|
|
|
u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx);
|
|
|
|
|
|
|
|
reprogram_fixed_counter(pmc, ctrl, idx);
|
2011-11-10 12:57:22 +00:00
|
|
|
}
|
|
|
|
}
|
2015-06-19 13:45:05 +00:00
|
|
|
EXPORT_SYMBOL_GPL(reprogram_counter);
|
2011-11-10 12:57:22 +00:00
|
|
|
|
2015-06-19 13:51:47 +00:00
|
|
|
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
|
|
int bit;
|
|
|
|
|
2019-10-21 10:55:04 +00:00
|
|
|
for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
|
2020-03-21 20:26:00 +00:00
|
|
|
struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, bit);
|
2015-06-19 13:51:47 +00:00
|
|
|
|
|
|
|
if (unlikely(!pmc || !pmc->perf_event)) {
|
2019-10-21 10:55:04 +00:00
|
|
|
clear_bit(bit, pmu->reprogram_pmi);
|
2015-06-19 13:51:47 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
reprogram_counter(pmu, bit);
|
|
|
|
}
|
2019-10-27 10:52:43 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Unused perf_events are only released if the corresponding MSRs
|
|
|
|
* weren't accessed during the last vCPU time slice. kvm_arch_sched_in
|
|
|
|
* triggers KVM_REQ_PMU if cleanup is needed.
|
|
|
|
*/
|
|
|
|
if (unlikely(pmu->need_cleanup))
|
|
|
|
kvm_pmu_cleanup(vcpu);
|
2015-06-19 13:51:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* check if idx is a valid index to access PMU */
|
2019-10-27 10:52:40 +00:00
|
|
|
int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
|
2015-06-19 13:51:47 +00:00
|
|
|
{
|
2020-03-21 20:26:00 +00:00
|
|
|
return kvm_x86_ops.pmu_ops->is_valid_rdpmc_ecx(vcpu, idx);
|
2015-06-19 14:16:59 +00:00
|
|
|
}
|
|
|
|
|
2018-03-12 11:12:53 +00:00
|
|
|
bool is_vmware_backdoor_pmc(u32 pmc_idx)
|
|
|
|
{
|
|
|
|
switch (pmc_idx) {
|
|
|
|
case VMWARE_BACKDOOR_PMC_HOST_TSC:
|
|
|
|
case VMWARE_BACKDOOR_PMC_REAL_TIME:
|
|
|
|
case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
|
|
|
|
{
|
|
|
|
u64 ctr_val;
|
|
|
|
|
|
|
|
switch (idx) {
|
|
|
|
case VMWARE_BACKDOOR_PMC_HOST_TSC:
|
|
|
|
ctr_val = rdtsc();
|
|
|
|
break;
|
|
|
|
case VMWARE_BACKDOOR_PMC_REAL_TIME:
|
2019-06-21 20:32:48 +00:00
|
|
|
ctr_val = ktime_get_boottime_ns();
|
2018-03-12 11:12:53 +00:00
|
|
|
break;
|
|
|
|
case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
|
2019-06-21 20:32:48 +00:00
|
|
|
ctr_val = ktime_get_boottime_ns() +
|
2018-03-12 11:12:53 +00:00
|
|
|
vcpu->kvm->arch.kvmclock_offset;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
*data = ctr_val;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-19 14:16:59 +00:00
|
|
|
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
|
|
|
|
{
|
|
|
|
bool fast_mode = idx & (1u << 31);
|
2019-03-25 19:10:17 +00:00
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
2015-06-19 14:16:59 +00:00
|
|
|
struct kvm_pmc *pmc;
|
2019-05-20 15:20:40 +00:00
|
|
|
u64 mask = fast_mode ? ~0u : ~0ull;
|
2015-06-19 14:16:59 +00:00
|
|
|
|
2019-03-25 19:10:17 +00:00
|
|
|
if (!pmu->version)
|
|
|
|
return 1;
|
|
|
|
|
2018-03-12 11:12:53 +00:00
|
|
|
if (is_vmware_backdoor_pmc(idx))
|
|
|
|
return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
|
|
|
|
|
2020-03-21 20:26:00 +00:00
|
|
|
pmc = kvm_x86_ops.pmu_ops->rdpmc_ecx_to_pmc(vcpu, idx, &mask);
|
2015-06-19 14:16:59 +00:00
|
|
|
if (!pmc)
|
|
|
|
return 1;
|
|
|
|
|
2020-07-08 07:44:09 +00:00
|
|
|
if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) &&
|
2021-01-15 03:27:56 +00:00
|
|
|
(static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
|
2020-07-08 07:44:09 +00:00
|
|
|
(kvm_read_cr0(vcpu) & X86_CR0_PE))
|
|
|
|
return 1;
|
|
|
|
|
2019-05-20 15:20:40 +00:00
|
|
|
*data = pmc_read_counter(pmc) & mask;
|
2015-06-19 13:51:47 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2021-02-01 05:10:36 +00:00
|
|
|
if (lapic_in_kernel(vcpu)) {
|
|
|
|
if (kvm_x86_ops.pmu_ops->deliver_pmi)
|
|
|
|
kvm_x86_ops.pmu_ops->deliver_pmi(vcpu);
|
2015-06-19 13:51:47 +00:00
|
|
|
kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
|
2021-02-01 05:10:36 +00:00
|
|
|
}
|
2015-06-19 13:51:47 +00:00
|
|
|
}
|
|
|
|
|
2015-06-19 11:44:45 +00:00
|
|
|
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
|
2011-11-10 12:57:22 +00:00
|
|
|
{
|
2020-03-21 20:26:00 +00:00
|
|
|
return kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr) ||
|
|
|
|
kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, msr);
|
2011-11-10 12:57:22 +00:00
|
|
|
}
|
|
|
|
|
2019-10-27 10:52:43 +00:00
|
|
|
static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
|
|
|
|
{
|
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
2020-03-21 20:26:00 +00:00
|
|
|
struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr);
|
2019-10-27 10:52:43 +00:00
|
|
|
|
|
|
|
if (pmc)
|
|
|
|
__set_bit(pmc->idx, pmu->pmc_in_use);
|
|
|
|
}
|
|
|
|
|
2020-05-29 07:43:44 +00:00
|
|
|
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
2011-11-10 12:57:22 +00:00
|
|
|
{
|
2020-05-29 07:43:44 +00:00
|
|
|
return kvm_x86_ops.pmu_ops->get_msr(vcpu, msr_info);
|
2011-11-10 12:57:22 +00:00
|
|
|
}
|
|
|
|
|
2013-03-28 16:18:35 +00:00
|
|
|
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
2011-11-10 12:57:22 +00:00
|
|
|
{
|
2019-10-27 10:52:43 +00:00
|
|
|
kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
|
2020-03-21 20:26:00 +00:00
|
|
|
return kvm_x86_ops.pmu_ops->set_msr(vcpu, msr_info);
|
2011-11-10 12:57:22 +00:00
|
|
|
}
|
|
|
|
|
2015-06-19 12:15:28 +00:00
|
|
|
/* refresh PMU settings. This function generally is called when underlying
|
|
|
|
* settings are changed (such as changes of PMU CPUID by guest VMs), which
|
|
|
|
* should rarely happen.
|
|
|
|
*/
|
2015-06-19 11:44:45 +00:00
|
|
|
void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
|
2011-11-10 12:57:22 +00:00
|
|
|
{
|
2020-03-21 20:26:00 +00:00
|
|
|
kvm_x86_ops.pmu_ops->refresh(vcpu);
|
2011-11-10 12:57:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_pmu_reset(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2015-06-19 12:00:33 +00:00
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
2011-11-10 12:57:22 +00:00
|
|
|
|
|
|
|
irq_work_sync(&pmu->irq_work);
|
2020-03-21 20:26:00 +00:00
|
|
|
kvm_x86_ops.pmu_ops->reset(vcpu);
|
2011-11-10 12:57:22 +00:00
|
|
|
}
|
|
|
|
|
2015-06-19 13:51:47 +00:00
|
|
|
void kvm_pmu_init(struct kvm_vcpu *vcpu)
|
2011-11-10 12:57:22 +00:00
|
|
|
{
|
2015-06-19 12:00:33 +00:00
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
2011-11-10 12:57:22 +00:00
|
|
|
|
2015-06-19 13:51:47 +00:00
|
|
|
memset(pmu, 0, sizeof(*pmu));
|
2020-03-21 20:26:00 +00:00
|
|
|
kvm_x86_ops.pmu_ops->init(vcpu);
|
2015-06-19 13:51:47 +00:00
|
|
|
init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
|
2019-10-27 10:52:43 +00:00
|
|
|
pmu->event_count = 0;
|
|
|
|
pmu->need_cleanup = false;
|
2015-06-19 13:51:47 +00:00
|
|
|
kvm_pmu_refresh(vcpu);
|
|
|
|
}
|
|
|
|
|
2019-10-27 10:52:43 +00:00
|
|
|
static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
|
|
|
|
{
|
|
|
|
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
|
|
|
|
|
|
|
|
if (pmc_is_fixed(pmc))
|
|
|
|
return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
|
|
|
|
pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3;
|
|
|
|
|
|
|
|
return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Release perf_events for vPMCs that have been unused for a full time slice. */
|
|
|
|
void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
|
|
struct kvm_pmc *pmc = NULL;
|
|
|
|
DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
pmu->need_cleanup = false;
|
|
|
|
|
|
|
|
bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
|
|
|
|
pmu->pmc_in_use, X86_PMC_IDX_MAX);
|
|
|
|
|
|
|
|
for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
|
2020-03-21 20:26:00 +00:00
|
|
|
pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i);
|
2019-10-27 10:52:43 +00:00
|
|
|
|
|
|
|
if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
|
|
|
|
pmc_stop_counter(pmc);
|
|
|
|
}
|
|
|
|
|
2021-02-01 05:10:37 +00:00
|
|
|
if (kvm_x86_ops.pmu_ops->cleanup)
|
|
|
|
kvm_x86_ops.pmu_ops->cleanup(vcpu);
|
|
|
|
|
2019-10-27 10:52:43 +00:00
|
|
|
bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
|
|
|
|
}
|
|
|
|
|
2015-06-19 13:51:47 +00:00
|
|
|
void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
kvm_pmu_reset(vcpu);
|
2011-11-10 12:57:22 +00:00
|
|
|
}
|
2019-07-11 01:25:15 +00:00
|
|
|
|
|
|
|
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
|
|
|
|
{
|
|
|
|
struct kvm_pmu_event_filter tmp, *filter;
|
|
|
|
size_t size;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (copy_from_user(&tmp, argp, sizeof(tmp)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (tmp.action != KVM_PMU_EVENT_ALLOW &&
|
|
|
|
tmp.action != KVM_PMU_EVENT_DENY)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2019-07-18 18:38:18 +00:00
|
|
|
if (tmp.flags != 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2019-07-11 01:25:15 +00:00
|
|
|
if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
|
|
|
|
return -E2BIG;
|
|
|
|
|
|
|
|
size = struct_size(filter, events, tmp.nevents);
|
|
|
|
filter = kmalloc(size, GFP_KERNEL_ACCOUNT);
|
|
|
|
if (!filter)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
r = -EFAULT;
|
|
|
|
if (copy_from_user(filter, argp, size))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* Ensure nevents can't be changed between the user copies. */
|
|
|
|
*filter = tmp;
|
|
|
|
|
|
|
|
mutex_lock(&kvm->lock);
|
2019-09-23 22:15:35 +00:00
|
|
|
filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
|
|
|
|
mutex_is_locked(&kvm->lock));
|
2019-07-11 01:25:15 +00:00
|
|
|
mutex_unlock(&kvm->lock);
|
|
|
|
|
|
|
|
synchronize_srcu_expedited(&kvm->srcu);
|
2019-07-18 18:38:18 +00:00
|
|
|
r = 0;
|
2019-07-11 01:25:15 +00:00
|
|
|
cleanup:
|
|
|
|
kfree(filter);
|
2019-07-18 18:38:18 +00:00
|
|
|
return r;
|
2019-07-11 01:25:15 +00:00
|
|
|
}
|