mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-27 12:57:53 +00:00
63f4b21041
KVM/s390, KVM/x86 and common infrastructure changes for 5.20 x86: * Permit guests to ignore single-bit ECC errors * Fix races in gfn->pfn cache refresh; do not pin pages tracked by the cache * Intel IPI virtualization * Allow getting/setting pending triple fault with KVM_GET/SET_VCPU_EVENTS * PEBS virtualization * Simplify PMU emulation by just using PERF_TYPE_RAW events * More accurate event reinjection on SVM (avoid retrying instructions) * Allow getting/setting the state of the speaker port data bit * Refuse starting the kvm-intel module if VM-Entry/VM-Exit controls are inconsistent * "Notify" VM exit (detect microarchitectural hangs) for Intel * Cleanups for MCE MSR emulation s390: * add an interface to provide a hypervisor dump for secure guests * improve selftests to use TAP interface * enable interpretive execution of zPCI instructions (for PCI passthrough) * First part of deferred teardown * CPU Topology * PV attestation * Minor fixes Generic: * new selftests API using struct kvm_vcpu instead of a (vm, id) tuple x86: * Use try_cmpxchg64 instead of cmpxchg64 * Bugfixes * Ignore benign host accesses to PMU MSRs when PMU is disabled * Allow disabling KVM's "MONITOR/MWAIT are NOPs!" behavior * x86/MMU: Allow NX huge pages to be disabled on a per-vm basis * Port eager page splitting to shadow MMU as well * Enable CMCI capability by default and handle injected UCNA errors * Expose pid of vcpu threads in debugfs * x2AVIC support for AMD * cleanup PIO emulation * Fixes for LLDT/LTR emulation * Don't require refcounted "struct page" to create huge SPTEs x86 cleanups: * Use separate namespaces for guest PTEs and shadow PTEs bitmasks * PIO emulation * Reorganize rmap API, mostly around rmap destruction * Do not workaround very old KVM bugs for L0 that runs with nesting enabled * new selftests API for CPUID
125 lines
2.7 KiB
C
125 lines
2.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
|
|
#ifndef __KVM_TYPES_H__
|
|
#define __KVM_TYPES_H__
|
|
|
|
struct kvm;
|
|
struct kvm_async_pf;
|
|
struct kvm_device_ops;
|
|
struct kvm_interrupt;
|
|
struct kvm_irq_routing_table;
|
|
struct kvm_memory_slot;
|
|
struct kvm_one_reg;
|
|
struct kvm_run;
|
|
struct kvm_userspace_memory_region;
|
|
struct kvm_vcpu;
|
|
struct kvm_vcpu_init;
|
|
struct kvm_memslots;
|
|
|
|
enum kvm_mr_change;
|
|
|
|
#include <linux/bits.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/types.h>
|
|
#include <linux/spinlock_types.h>
|
|
|
|
#include <asm/kvm_types.h>
|
|
|
|
/*
|
|
* Address types:
|
|
*
|
|
* gva - guest virtual address
|
|
* gpa - guest physical address
|
|
* gfn - guest frame number
|
|
* hva - host virtual address
|
|
* hpa - host physical address
|
|
* hfn - host frame number
|
|
*/
|
|
|
|
typedef unsigned long gva_t;
|
|
typedef u64 gpa_t;
|
|
typedef u64 gfn_t;
|
|
|
|
#define GPA_INVALID (~(gpa_t)0)
|
|
|
|
typedef unsigned long hva_t;
|
|
typedef u64 hpa_t;
|
|
typedef u64 hfn_t;
|
|
|
|
typedef hfn_t kvm_pfn_t;
|
|
|
|
enum pfn_cache_usage {
|
|
KVM_GUEST_USES_PFN = BIT(0),
|
|
KVM_HOST_USES_PFN = BIT(1),
|
|
KVM_GUEST_AND_HOST_USE_PFN = KVM_GUEST_USES_PFN | KVM_HOST_USES_PFN,
|
|
};
|
|
|
|
struct gfn_to_hva_cache {
|
|
u64 generation;
|
|
gpa_t gpa;
|
|
unsigned long hva;
|
|
unsigned long len;
|
|
struct kvm_memory_slot *memslot;
|
|
};
|
|
|
|
struct gfn_to_pfn_cache {
|
|
u64 generation;
|
|
gpa_t gpa;
|
|
unsigned long uhva;
|
|
struct kvm_memory_slot *memslot;
|
|
struct kvm_vcpu *vcpu;
|
|
struct list_head list;
|
|
rwlock_t lock;
|
|
struct mutex refresh_lock;
|
|
void *khva;
|
|
kvm_pfn_t pfn;
|
|
enum pfn_cache_usage usage;
|
|
bool active;
|
|
bool valid;
|
|
};
|
|
|
|
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
|
|
/*
|
|
* Memory caches are used to preallocate memory ahead of various MMU flows,
|
|
* e.g. page fault handlers. Gracefully handling allocation failures deep in
|
|
* MMU flows is problematic, as is triggering reclaim, I/O, etc... while
|
|
* holding MMU locks. Note, these caches act more like prefetch buffers than
|
|
* classical caches, i.e. objects are not returned to the cache on being freed.
|
|
*
|
|
* The @capacity field and @objects array are lazily initialized when the cache
|
|
* is topped up (__kvm_mmu_topup_memory_cache()).
|
|
*/
|
|
struct kvm_mmu_memory_cache {
|
|
int nobjs;
|
|
gfp_t gfp_zero;
|
|
gfp_t gfp_custom;
|
|
struct kmem_cache *kmem_cache;
|
|
int capacity;
|
|
void **objects;
|
|
};
|
|
#endif
|
|
|
|
#define HALT_POLL_HIST_COUNT 32
|
|
|
|
struct kvm_vm_stat_generic {
|
|
u64 remote_tlb_flush;
|
|
u64 remote_tlb_flush_requests;
|
|
};
|
|
|
|
struct kvm_vcpu_stat_generic {
|
|
u64 halt_successful_poll;
|
|
u64 halt_attempted_poll;
|
|
u64 halt_poll_invalid;
|
|
u64 halt_wakeup;
|
|
u64 halt_poll_success_ns;
|
|
u64 halt_poll_fail_ns;
|
|
u64 halt_wait_ns;
|
|
u64 halt_poll_success_hist[HALT_POLL_HIST_COUNT];
|
|
u64 halt_poll_fail_hist[HALT_POLL_HIST_COUNT];
|
|
u64 halt_wait_hist[HALT_POLL_HIST_COUNT];
|
|
u64 blocking;
|
|
};
|
|
|
|
#define KVM_STATS_NAME_SIZE 48
|
|
|
|
#endif /* __KVM_TYPES_H__ */
|