From 9ea597286570b50f94af3f785d099e5a5859a745 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 25 Sep 2018 19:16:16 -0400 Subject: [PATCH 001/204] KVM: s390: vsie: simulate VCPU SIE entry/exit VCPU requests and VCPU blocking right now don't take care of the vSIE (as it was not necessary until now). But we want to have synchronous VCPU requests that will also be handled before running the vSIE again. So let's simulate a SIE entry of the VCPU when calling the sie during vSIE handling and check for PROG_ flags. The existing infrastructure (e.g. exit_sie()) will then detect that the SIE (in form of the vSIE) is running and properly kick the vSIE CPU, resulting in it leaving the vSIE loop and therefore the vSIE interception handler, allowing it to handle VCPU requests. E.g. if we want to modify the crycb of the VCPU and make sure that any masks also get applied to the VSIE crycb shadow (which uses masks from the VCPU crycb), we will need a way to hinder the vSIE from running and make sure to process the updated crycb before reentering the vSIE again. Signed-off-by: David Hildenbrand Signed-off-by: Tony Krowiak Reviewed-by: Pierre Morel Reviewed-by: Cornelia Huck Reviewed-by: Janosch Frank Reviewed-by: Christian Borntraeger Message-Id: <20180925231641.4954-2-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 9 ++++++++- arch/s390/kvm/kvm-s390.h | 1 + arch/s390/kvm/vsie.c | 21 +++++++++++++++++---- 3 files changed, 26 insertions(+), 5 deletions(-) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f69333fd2fa3..0b5aff0e3984 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2768,18 +2768,25 @@ static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu) exit_sie(vcpu); } +bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu) +{ + return atomic_read(&vcpu->arch.sie_block->prog20) & + (PROG_BLOCK_SIE | PROG_REQUEST); +} + static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) { atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20); } /* - * Kick a guest cpu out of SIE and wait until SIE is not running. + * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running. * If the CPU is not running (e.g. waiting as idle) the function will * return immediately. */ void exit_sie(struct kvm_vcpu *vcpu) { kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT); + kvm_s390_vsie_kick(vcpu); while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) cpu_relax(); } diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 981e3ba97461..1f6e36cdce0d 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -290,6 +290,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu); +bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu); void exit_sie(struct kvm_vcpu *vcpu); void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu); int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu); diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index a2b28cd1e3fe..12b970701c26 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -830,7 +830,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; int guest_bp_isolation; - int rc; + int rc = 0; handle_last_fault(vcpu, vsie_page); @@ -858,7 +858,18 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) guest_enter_irqoff(); local_irq_enable(); - rc = sie64a(scb_s, vcpu->run->s.regs.gprs); + /* + * Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking + * and VCPU requests also hinder the vSIE from running and lead + * to an immediate exit. kvm_s390_vsie_kick() has to be used to + * also kick the vSIE. + */ + vcpu->arch.sie_block->prog0c |= PROG_IN_SIE; + barrier(); + if (!kvm_s390_vcpu_sie_inhibited(vcpu)) + rc = sie64a(scb_s, vcpu->run->s.regs.gprs); + barrier(); + vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE; local_irq_disable(); guest_exit_irqoff(); @@ -1005,7 +1016,8 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) if (rc == -EAGAIN) rc = 0; if (rc || scb_s->icptcode || signal_pending(current) || - kvm_s390_vcpu_has_irq(vcpu, 0)) + kvm_s390_vcpu_has_irq(vcpu, 0) || + kvm_s390_vcpu_sie_inhibited(vcpu)) break; } @@ -1122,7 +1134,8 @@ int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu) if (unlikely(scb_addr & 0x1ffUL)) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0)) + if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0) || + kvm_s390_vcpu_sie_inhibited(vcpu)) return 0; vsie_page = get_vsie_page(vcpu->kvm, scb_addr); From 3194cdb71190a74d46ae456efef10ecfc6f1e062 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 25 Sep 2018 19:16:17 -0400 Subject: [PATCH 002/204] KVM: s390: introduce and use KVM_REQ_VSIE_RESTART When we change the crycb (or execution controls), we also have to make sure that the vSIE shadow datastructures properly consider the changed values before rerunning the vSIE. We can achieve that by simply using a VCPU request now. This has to be a synchronous request (== handled before entering the (v)SIE again). The request will make sure that the vSIE handler is left, and that the request will be processed (NOP), therefore forcing a reload of all vSIE data (including rebuilding the crycb) when re-entering the vSIE interception handler the next time. Signed-off-by: David Hildenbrand Signed-off-by: Tony Krowiak Reviewed-by: Pierre Morel Reviewed-by: Cornelia Huck Reviewed-by: Janosch Frank Reviewed-by: Christian Borntraeger Message-Id: <20180925231641.4954-3-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 1 + arch/s390/kvm/kvm-s390.c | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 29c940bf8506..75d39628f21d 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -44,6 +44,7 @@ #define KVM_REQ_ICPT_OPEREXC KVM_ARCH_REQ(2) #define KVM_REQ_START_MIGRATION KVM_ARCH_REQ(3) #define KVM_REQ_STOP_MIGRATION KVM_ARCH_REQ(4) +#define KVM_REQ_VSIE_RESTART KVM_ARCH_REQ(5) #define SIGP_CTRL_C 0x80 #define SIGP_CTRL_SCN_MASK 0x3f diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0b5aff0e3984..876fbb2b0c04 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -844,8 +844,11 @@ void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm) kvm_s390_vcpu_block_all(kvm); - kvm_for_each_vcpu(i, vcpu, kvm) + kvm_for_each_vcpu(i, vcpu, kvm) { kvm_s390_vcpu_crypto_setup(vcpu); + /* recreate the shadow crycb by leaving the VSIE handler */ + kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu); + } kvm_s390_vcpu_unblock_all(kvm); } @@ -3203,6 +3206,8 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) /* nothing to do, just clear the request */ kvm_clear_request(KVM_REQ_UNHALT, vcpu); + /* we left the vsie handler, nothing to do, just clear the request */ + kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu); return 0; } From e585b24aeb445493b6a581d6c42fa58f663e93e0 Mon Sep 17 00:00:00 2001 From: Tony Krowiak Date: Tue, 25 Sep 2018 19:16:18 -0400 Subject: [PATCH 003/204] KVM: s390: refactor crypto initialization This patch refactors the code that initializes and sets up the crypto configuration for a guest. The following changes are implemented via this patch: 1. Introduces a flag indicating AP instructions executed on the guest shall be interpreted by the firmware. This flag is used to set a bit in the guest's state description indicating AP instructions are to be interpreted. 2. Replace code implementing AP interfaces with code supplied by the AP bus to query the AP configuration. Signed-off-by: Tony Krowiak Reviewed-by: Halil Pasic Acked-by: Christian Borntraeger Acked-by: Janosch Frank Reviewed-by: Cornelia Huck Tested-by: Michael Mueller Tested-by: Farhan Ali Message-Id: <20180925231641.4954-4-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 3 ++ arch/s390/kvm/kvm-s390.c | 70 +++++++++++++++----------------- 2 files changed, 36 insertions(+), 37 deletions(-) diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 75d39628f21d..79fa0a3a777f 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -187,6 +187,7 @@ struct kvm_s390_sie_block { #define ECA_AIV 0x00200000 #define ECA_VX 0x00020000 #define ECA_PROTEXCI 0x00002000 +#define ECA_APIE 0x00000008 #define ECA_SII 0x00000001 __u32 eca; /* 0x004c */ #define ICPT_INST 0x04 @@ -256,6 +257,7 @@ struct kvm_s390_sie_block { __u8 reservede4[4]; /* 0x00e4 */ __u64 tecmc; /* 0x00e8 */ __u8 reservedf0[12]; /* 0x00f0 */ +#define CRYCB_FORMAT_MASK 0x00000003 #define CRYCB_FORMAT1 0x00000001 #define CRYCB_FORMAT2 0x00000003 __u32 crycbd; /* 0x00fc */ @@ -716,6 +718,7 @@ struct kvm_s390_crypto { __u32 crycbd; __u8 aes_kw; __u8 dea_kw; + __u8 apie; }; #define APCB0_MASK_SIZE 1 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 876fbb2b0c04..d62f6d840f8c 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -40,6 +40,7 @@ #include #include #include +#include #include "kvm-s390.h" #include "gaccess.h" @@ -1995,49 +1996,37 @@ long kvm_arch_vm_ioctl(struct file *filp, return r; } -static int kvm_s390_query_ap_config(u8 *config) -{ - u32 fcn_code = 0x04000000UL; - u32 cc = 0; - - memset(config, 0, 128); - asm volatile( - "lgr 0,%1\n" - "lgr 2,%2\n" - ".long 0xb2af0000\n" /* PQAP(QCI) */ - "0: ipm %0\n" - "srl %0,28\n" - "1:\n" - EX_TABLE(0b, 1b) - : "+r" (cc) - : "r" (fcn_code), "r" (config) - : "cc", "0", "2", "memory" - ); - - return cc; -} - static int kvm_s390_apxa_installed(void) { - u8 config[128]; - int cc; + struct ap_config_info info; - if (test_facility(12)) { - cc = kvm_s390_query_ap_config(config); - - if (cc) - pr_err("PQAP(QCI) failed with cc=%d", cc); - else - return config[0] & 0x40; + if (ap_instructions_available()) { + if (ap_qci(&info) == 0) + return info.apxa; } return 0; } +/* + * The format of the crypto control block (CRYCB) is specified in the 3 low + * order bits of the CRYCB designation (CRYCBD) field as follows: + * Format 0: Neither the message security assist extension 3 (MSAX3) nor the + * AP extended addressing (APXA) facility are installed. + * Format 1: The APXA facility is not installed but the MSAX3 facility is. + * Format 2: Both the APXA and MSAX3 facilities are installed + */ static void kvm_s390_set_crycb_format(struct kvm *kvm) { kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; + /* Clear the CRYCB format bits - i.e., set format 0 by default */ + kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK); + + /* Check whether MSAX3 is installed */ + if (!test_kvm_facility(kvm, 76)) + return; + if (kvm_s390_apxa_installed()) kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; else @@ -2055,12 +2044,12 @@ static u64 kvm_s390_get_initial_cpuid(void) static void kvm_s390_crypto_init(struct kvm *kvm) { - if (!test_kvm_facility(kvm, 76)) - return; - kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; kvm_s390_set_crycb_format(kvm); + if (!test_kvm_facility(kvm, 76)) + return; + /* Enable AES/DEA protected key functions by default */ kvm->arch.crypto.aes_kw = 1; kvm->arch.crypto.dea_kw = 1; @@ -2586,17 +2575,24 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) { - if (!test_kvm_facility(vcpu->kvm, 76)) + /* + * If the AP instructions are not being interpreted and the MSAX3 + * facility is not configured for the guest, there is nothing to set up. + */ + if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76)) return; + vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); + if (vcpu->kvm->arch.crypto.apie) + vcpu->arch.sie_block->eca |= ECA_APIE; + + /* Set up protected key support */ if (vcpu->kvm->arch.crypto.aes_kw) vcpu->arch.sie_block->ecb3 |= ECB3_AES; if (vcpu->kvm->arch.crypto.dea_kw) vcpu->arch.sie_block->ecb3 |= ECB3_DEA; - - vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; } void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu) From 1fde573413b549d52183382e639c1d6ce88f5959 Mon Sep 17 00:00:00 2001 From: Tony Krowiak Date: Tue, 25 Sep 2018 19:16:19 -0400 Subject: [PATCH 004/204] s390: vfio-ap: base implementation of VFIO AP device driver Introduces a new AP device driver. This device driver is built on the VFIO mediated device framework. The framework provides sysfs interfaces that facilitate passthrough access by guests to devices installed on the linux host. The VFIO AP device driver will serve two purposes: 1. Provide the interfaces to reserve AP devices for exclusive use by KVM guests. This is accomplished by unbinding the devices to be reserved for guest usage from the zcrypt device driver and binding them to the VFIO AP device driver. 2. Implements the functions, callbacks and sysfs attribute interfaces required to create one or more VFIO mediated devices each of which will be used to configure the AP matrix for a guest and serve as a file descriptor for facilitating communication between QEMU and the VFIO AP device driver. When the VFIO AP device driver is initialized: * It registers with the AP bus for control of type 10 (CEX4 and newer) AP queue devices. This limitation was imposed due to: 1. A desire to keep the code as simple as possible; 2. Some older models are no longer supported by the kernel and others are getting close to end of service. 3. A lack of older systems on which to test older devices. The probe and remove callbacks will be provided to support the binding/unbinding of AP queue devices to/from the VFIO AP device driver. * Creates a matrix device, /sys/devices/vfio_ap/matrix, to serve as the parent of the mediated devices created, one for each guest, and to hold the APQNs of the AP devices bound to the VFIO AP device driver. Signed-off-by: Tony Krowiak Reviewed-by: Halil Pasic Tested-by: Michael Mueller Tested-by: Farhan Ali Acked-by: David Hildenbrand Reviewed-by: Cornelia Huck Message-Id: <20180925231641.4954-5-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- MAINTAINERS | 10 ++ arch/s390/Kconfig | 11 ++ drivers/iommu/Kconfig | 8 ++ drivers/s390/crypto/Makefile | 4 + drivers/s390/crypto/vfio_ap_drv.c | 138 ++++++++++++++++++++++++++ drivers/s390/crypto/vfio_ap_private.h | 34 +++++++ 6 files changed, 205 insertions(+) create mode 100644 drivers/s390/crypto/vfio_ap_drv.c create mode 100644 drivers/s390/crypto/vfio_ap_private.h diff --git a/MAINTAINERS b/MAINTAINERS index 4ece30f15777..558f2abe7073 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12668,6 +12668,16 @@ W: http://www.ibm.com/developerworks/linux/linux390/ S: Supported F: drivers/s390/crypto/ +S390 VFIO AP DRIVER +M: Tony Krowiak +M: Pierre Morel +M: Halil Pasic +L: linux-s390@vger.kernel.org +W: http://www.ibm.com/developerworks/linux/linux390/ +S: Supported +F: drivers/s390/crypto/vfio_ap_drv.c +F: drivers/s390/crypto/vfio_ap_private.h + S390 ZFCP DRIVER M: Steffen Maier M: Benjamin Block diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 9a9c7a6fe925..8cc8f25d9576 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -773,6 +773,17 @@ config VFIO_CCW To compile this driver as a module, choose M here: the module will be called vfio_ccw. +config VFIO_AP + def_tristate n + prompt "VFIO support for AP devices" + depends on S390_AP_IOMMU && VFIO_MDEV_DEVICE && KVM + help + This driver grants access to Adjunct Processor (AP) devices + via the VFIO mediated device interface. + + To compile this driver as a module, choose M here: the module + will be called vfio_ap. + endmenu menu "Dump support" diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index c60395b7470f..83e6d993fca5 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -372,6 +372,14 @@ config S390_CCW_IOMMU Enables bits of IOMMU API required by VFIO. The iommu_ops is not implemented as it is not necessary for VFIO. +config S390_AP_IOMMU + bool "S390 AP IOMMU Support" + depends on S390 && ZCRYPT + select IOMMU_API + help + Enables bits of IOMMU API required by VFIO. The iommu_ops + is not implemented as it is not necessary for VFIO. + config MTK_IOMMU bool "MTK IOMMU Support" depends on ARM || ARM64 diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile index b59af548ed1c..48e466eb19cf 100644 --- a/drivers/s390/crypto/Makefile +++ b/drivers/s390/crypto/Makefile @@ -15,3 +15,7 @@ obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o # pkey kernel module pkey-objs := pkey_api.o obj-$(CONFIG_PKEY) += pkey.o + +# adjunct processor matrix +vfio_ap-objs := vfio_ap_drv.o +obj-$(CONFIG_VFIO_AP) += vfio_ap.o diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c new file mode 100644 index 000000000000..ea2ae03c896e --- /dev/null +++ b/drivers/s390/crypto/vfio_ap_drv.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * VFIO based AP device driver + * + * Copyright IBM Corp. 2018 + * + * Author(s): Tony Krowiak + */ + +#include +#include +#include +#include +#include "vfio_ap_private.h" + +#define VFIO_AP_ROOT_NAME "vfio_ap" +#define VFIO_AP_DEV_TYPE_NAME "ap_matrix" +#define VFIO_AP_DEV_NAME "matrix" + +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("VFIO AP device driver, Copyright IBM Corp. 2018"); +MODULE_LICENSE("GPL v2"); + +static struct ap_driver vfio_ap_drv; + +static struct device_type vfio_ap_dev_type = { + .name = VFIO_AP_DEV_TYPE_NAME, +}; + +struct ap_matrix_dev *matrix_dev; + +/* Only type 10 adapters (CEX4 and later) are supported + * by the AP matrix device driver + */ +static struct ap_device_id ap_queue_ids[] = { + { .dev_type = AP_DEVICE_TYPE_CEX4, + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX5, + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX6, + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, + { /* end of sibling */ }, +}; + +MODULE_DEVICE_TABLE(vfio_ap, ap_queue_ids); + +static int vfio_ap_queue_dev_probe(struct ap_device *apdev) +{ + return 0; +} + +static void vfio_ap_queue_dev_remove(struct ap_device *apdev) +{ + /* Nothing to do yet */ +} + +static void vfio_ap_matrix_dev_release(struct device *dev) +{ + struct ap_matrix_dev *matrix_dev = dev_get_drvdata(dev); + + kfree(matrix_dev); +} + +static int vfio_ap_matrix_dev_create(void) +{ + int ret; + struct device *root_device; + + root_device = root_device_register(VFIO_AP_ROOT_NAME); + if (IS_ERR(root_device)) + return PTR_ERR(root_device); + + matrix_dev = kzalloc(sizeof(*matrix_dev), GFP_KERNEL); + if (!matrix_dev) { + ret = -ENOMEM; + goto matrix_alloc_err; + } + + matrix_dev->device.type = &vfio_ap_dev_type; + dev_set_name(&matrix_dev->device, "%s", VFIO_AP_DEV_NAME); + matrix_dev->device.parent = root_device; + matrix_dev->device.release = vfio_ap_matrix_dev_release; + matrix_dev->device.driver = &vfio_ap_drv.driver; + + ret = device_register(&matrix_dev->device); + if (ret) + goto matrix_reg_err; + + return 0; + +matrix_reg_err: + put_device(&matrix_dev->device); +matrix_alloc_err: + root_device_unregister(root_device); + + return ret; +} + +static void vfio_ap_matrix_dev_destroy(void) +{ + device_unregister(&matrix_dev->device); + root_device_unregister(matrix_dev->device.parent); +} + +int __init vfio_ap_init(void) +{ + int ret; + + /* If there are no AP instructions, there is nothing to pass through. */ + if (!ap_instructions_available()) + return -ENODEV; + + ret = vfio_ap_matrix_dev_create(); + if (ret) + return ret; + + memset(&vfio_ap_drv, 0, sizeof(vfio_ap_drv)); + vfio_ap_drv.probe = vfio_ap_queue_dev_probe; + vfio_ap_drv.remove = vfio_ap_queue_dev_remove; + vfio_ap_drv.ids = ap_queue_ids; + + ret = ap_driver_register(&vfio_ap_drv, THIS_MODULE, VFIO_AP_DRV_NAME); + if (ret) { + vfio_ap_matrix_dev_destroy(); + return ret; + } + + return 0; +} + +void __exit vfio_ap_exit(void) +{ + ap_driver_unregister(&vfio_ap_drv); + vfio_ap_matrix_dev_destroy(); +} + +module_init(vfio_ap_init); +module_exit(vfio_ap_exit); diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h new file mode 100644 index 000000000000..6141420c8bb0 --- /dev/null +++ b/drivers/s390/crypto/vfio_ap_private.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Private data and functions for adjunct processor VFIO matrix driver. + * + * Author(s): Tony Krowiak + * + * Copyright IBM Corp. 2018 + */ + +#ifndef _VFIO_AP_PRIVATE_H_ +#define _VFIO_AP_PRIVATE_H_ + +#include +#include +#include +#include +#include + +#include "ap_bus.h" + +#define VFIO_AP_MODULE_NAME "vfio_ap" +#define VFIO_AP_DRV_NAME "vfio_ap" + +/** + * ap_matrix_dev - the AP matrix device structure + * @device: generic device structure associated with the AP matrix device + */ +struct ap_matrix_dev { + struct device device; +}; + +extern struct ap_matrix_dev *matrix_dev; + +#endif /* _VFIO_AP_PRIVATE_H_ */ From 65f06713d3fa0e4125f59ad5b9d6239109b1d7fc Mon Sep 17 00:00:00 2001 From: Tony Krowiak Date: Tue, 25 Sep 2018 19:16:20 -0400 Subject: [PATCH 005/204] s390: vfio-ap: register matrix device with VFIO mdev framework Registers the matrix device created by the VFIO AP device driver with the VFIO mediated device framework. Registering the matrix device will create the sysfs structures needed to create mediated matrix devices each of which will be used to configure the AP matrix for a guest and connect it to the VFIO AP device driver. Registering the matrix device with the VFIO mediated device framework will create the following sysfs structures: /sys/devices/vfio_ap/matrix/ ...... [mdev_supported_types] ......... [vfio_ap-passthrough] ............ create To create a mediated device for the AP matrix device, write a UUID to the create file: uuidgen > create A symbolic link to the mediated device's directory will be created in the devices subdirectory named after the generated $uuid: /sys/devices/vfio_ap/matrix/ ...... [mdev_supported_types] ......... [vfio_ap-passthrough] ............ [devices] ............... [$uuid] A symbolic link to the mediated device will also be created in the vfio_ap matrix's directory: /sys/devices/vfio_ap/matrix/[$uuid] Signed-off-by: Tony Krowiak Reviewed-by: Halil Pasic Reviewed-by: Cornelia Huck Tested-by: Michael Mueller Tested-by: Farhan Ali Message-Id: <20180925231641.4954-6-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- MAINTAINERS | 1 + drivers/s390/crypto/Makefile | 2 +- drivers/s390/crypto/vfio_ap_drv.c | 19 ++++ drivers/s390/crypto/vfio_ap_ops.c | 126 ++++++++++++++++++++++++++ drivers/s390/crypto/vfio_ap_private.h | 49 ++++++++++ include/uapi/linux/vfio.h | 1 + 6 files changed, 197 insertions(+), 1 deletion(-) create mode 100644 drivers/s390/crypto/vfio_ap_ops.c diff --git a/MAINTAINERS b/MAINTAINERS index 558f2abe7073..9cd3997445be 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12677,6 +12677,7 @@ W: http://www.ibm.com/developerworks/linux/linux390/ S: Supported F: drivers/s390/crypto/vfio_ap_drv.c F: drivers/s390/crypto/vfio_ap_private.h +F: drivers/s390/crypto/vfio_ap_ops.c S390 ZFCP DRIVER M: Steffen Maier diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile index 48e466eb19cf..8d36b05a7575 100644 --- a/drivers/s390/crypto/Makefile +++ b/drivers/s390/crypto/Makefile @@ -17,5 +17,5 @@ pkey-objs := pkey_api.o obj-$(CONFIG_PKEY) += pkey.o # adjunct processor matrix -vfio_ap-objs := vfio_ap_drv.o +vfio_ap-objs := vfio_ap_drv.o vfio_ap_ops.o obj-$(CONFIG_VFIO_AP) += vfio_ap.o diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c index ea2ae03c896e..8b51821d9bf7 100644 --- a/drivers/s390/crypto/vfio_ap_drv.c +++ b/drivers/s390/crypto/vfio_ap_drv.c @@ -76,6 +76,16 @@ static int vfio_ap_matrix_dev_create(void) goto matrix_alloc_err; } + /* Fill in config info via PQAP(QCI), if available */ + if (test_facility(12)) { + ret = ap_qci(&matrix_dev->info); + if (ret) + goto matrix_alloc_err; + } + + mutex_init(&matrix_dev->lock); + INIT_LIST_HEAD(&matrix_dev->mdev_list); + matrix_dev->device.type = &vfio_ap_dev_type; dev_set_name(&matrix_dev->device, "%s", VFIO_AP_DEV_NAME); matrix_dev->device.parent = root_device; @@ -125,11 +135,20 @@ int __init vfio_ap_init(void) return ret; } + ret = vfio_ap_mdev_register(); + if (ret) { + ap_driver_unregister(&vfio_ap_drv); + vfio_ap_matrix_dev_destroy(); + + return ret; + } + return 0; } void __exit vfio_ap_exit(void) { + vfio_ap_mdev_unregister(); ap_driver_unregister(&vfio_ap_drv); vfio_ap_matrix_dev_destroy(); } diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c new file mode 100644 index 000000000000..99ed30315f56 --- /dev/null +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Adjunct processor matrix VFIO device driver callbacks. + * + * Copyright IBM Corp. 2018 + * + * Author(s): Tony Krowiak + * Halil Pasic + * Pierre Morel + */ +#include +#include +#include +#include +#include +#include + +#include "vfio_ap_private.h" + +#define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough" +#define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device" + +static void vfio_ap_matrix_init(struct ap_config_info *info, + struct ap_matrix *matrix) +{ + matrix->apm_max = info->apxa ? info->Na : 63; + matrix->aqm_max = info->apxa ? info->Nd : 15; + matrix->adm_max = info->apxa ? info->Nd : 15; +} + +static int vfio_ap_mdev_create(struct kobject *kobj, struct mdev_device *mdev) +{ + struct ap_matrix_mdev *matrix_mdev; + + if ((atomic_dec_if_positive(&matrix_dev->available_instances) < 0)) + return -EPERM; + + matrix_mdev = kzalloc(sizeof(*matrix_mdev), GFP_KERNEL); + if (!matrix_mdev) { + atomic_inc(&matrix_dev->available_instances); + return -ENOMEM; + } + + vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix); + mdev_set_drvdata(mdev, matrix_mdev); + mutex_lock(&matrix_dev->lock); + list_add(&matrix_mdev->node, &matrix_dev->mdev_list); + mutex_unlock(&matrix_dev->lock); + + return 0; +} + +static int vfio_ap_mdev_remove(struct mdev_device *mdev) +{ + struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + + mutex_lock(&matrix_dev->lock); + list_del(&matrix_mdev->node); + mutex_unlock(&matrix_dev->lock); + + kfree(matrix_mdev); + mdev_set_drvdata(mdev, NULL); + atomic_inc(&matrix_dev->available_instances); + + return 0; +} + +static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf) +{ + return sprintf(buf, "%s\n", VFIO_AP_MDEV_NAME_HWVIRT); +} + +MDEV_TYPE_ATTR_RO(name); + +static ssize_t available_instances_show(struct kobject *kobj, + struct device *dev, char *buf) +{ + return sprintf(buf, "%d\n", + atomic_read(&matrix_dev->available_instances)); +} + +MDEV_TYPE_ATTR_RO(available_instances); + +static ssize_t device_api_show(struct kobject *kobj, struct device *dev, + char *buf) +{ + return sprintf(buf, "%s\n", VFIO_DEVICE_API_AP_STRING); +} + +MDEV_TYPE_ATTR_RO(device_api); + +static struct attribute *vfio_ap_mdev_type_attrs[] = { + &mdev_type_attr_name.attr, + &mdev_type_attr_device_api.attr, + &mdev_type_attr_available_instances.attr, + NULL, +}; + +static struct attribute_group vfio_ap_mdev_hwvirt_type_group = { + .name = VFIO_AP_MDEV_TYPE_HWVIRT, + .attrs = vfio_ap_mdev_type_attrs, +}; + +static struct attribute_group *vfio_ap_mdev_type_groups[] = { + &vfio_ap_mdev_hwvirt_type_group, + NULL, +}; + +static const struct mdev_parent_ops vfio_ap_matrix_ops = { + .owner = THIS_MODULE, + .supported_type_groups = vfio_ap_mdev_type_groups, + .create = vfio_ap_mdev_create, + .remove = vfio_ap_mdev_remove, +}; + +int vfio_ap_mdev_register(void) +{ + atomic_set(&matrix_dev->available_instances, MAX_ZDEV_ENTRIES_EXT); + + return mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops); +} + +void vfio_ap_mdev_unregister(void) +{ + mdev_unregister_device(&matrix_dev->device); +} diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h index 6141420c8bb0..9f197ffab7ad 100644 --- a/drivers/s390/crypto/vfio_ap_private.h +++ b/drivers/s390/crypto/vfio_ap_private.h @@ -3,6 +3,7 @@ * Private data and functions for adjunct processor VFIO matrix driver. * * Author(s): Tony Krowiak + * Halil Pasic * * Copyright IBM Corp. 2018 */ @@ -24,11 +25,59 @@ /** * ap_matrix_dev - the AP matrix device structure * @device: generic device structure associated with the AP matrix device + * @available_instances: number of mediated matrix devices that can be created + * @info: the struct containing the output from the PQAP(QCI) instruction + * mdev_list: the list of mediated matrix devices created + * lock: mutex for locking the AP matrix device. This lock will be + * taken every time we fiddle with state managed by the vfio_ap + * driver, be it using @mdev_list or writing the state of a + * single ap_matrix_mdev device. It's quite coarse but we don't + * expect much contention. */ struct ap_matrix_dev { struct device device; + atomic_t available_instances; + struct ap_config_info info; + struct list_head mdev_list; + struct mutex lock; }; extern struct ap_matrix_dev *matrix_dev; +/** + * The AP matrix is comprised of three bit masks identifying the adapters, + * queues (domains) and control domains that belong to an AP matrix. The bits i + * each mask, from least significant to most significant bit, correspond to IDs + * 0 to 255. When a bit is set, the corresponding ID belongs to the matrix. + * + * @apm_max: max adapter number in @apm + * @apm identifies the AP adapters in the matrix + * @aqm_max: max domain number in @aqm + * @aqm identifies the AP queues (domains) in the matrix + * @adm_max: max domain number in @adm + * @adm identifies the AP control domains in the matrix + */ +struct ap_matrix { + unsigned long apm_max; + DECLARE_BITMAP(apm, 256); + unsigned long aqm_max; + DECLARE_BITMAP(aqm, 256); + unsigned long adm_max; + DECLARE_BITMAP(adm, 256); +}; + +/** + * struct ap_matrix_mdev - the mediated matrix device structure + * @list: allows the ap_matrix_mdev struct to be added to a list + * @matrix: the adapters, usage domains and control domains assigned to the + * mediated matrix device. + */ +struct ap_matrix_mdev { + struct list_head node; + struct ap_matrix matrix; +}; + +extern int vfio_ap_mdev_register(void); +extern void vfio_ap_mdev_unregister(void); + #endif /* _VFIO_AP_PRIVATE_H_ */ diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 1aa7b82e8169..bfbe2be8f369 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -215,6 +215,7 @@ struct vfio_device_info { #define VFIO_DEVICE_API_PLATFORM_STRING "vfio-platform" #define VFIO_DEVICE_API_AMBA_STRING "vfio-amba" #define VFIO_DEVICE_API_CCW_STRING "vfio-ccw" +#define VFIO_DEVICE_API_AP_STRING "vfio-ap" /** * VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8, From 96d152bdc987ad76da49be350dc9f8ab1647e801 Mon Sep 17 00:00:00 2001 From: Tony Krowiak Date: Tue, 25 Sep 2018 19:16:21 -0400 Subject: [PATCH 006/204] s390: vfio-ap: sysfs interfaces to configure adapters Introduces two new sysfs attributes for the VFIO mediated matrix device for assigning AP adapters to and unassigning AP adapters from a mediated matrix device. The IDs of the AP adapters assigned to the mediated matrix device will be stored in an AP mask (APM). The bits in the APM, from most significant to least significant bit, correspond to AP adapter IDs (APID) 0 to 255. On some systems, the maximum allowable adapter number may be less than 255 - depending upon the host's AP configuration - and assignment may be rejected if the input adapter ID exceeds the limit. When an adapter is assigned, the bit corresponding to the APID will be set in the APM. Likewise, when an adapter is unassigned, the bit corresponding to the APID will be cleared from the APM. In order to successfully assign an adapter, the APQNs derived from the adapter ID being assigned and the queue indexes of all domains previously assigned: 1. Must be bound to the vfio_ap device driver. 2. Must not be assigned to any other mediated matrix device If there are no domains assigned to the mdev, then there must be an AP queue bound to the vfio_ap device driver with an APQN containing the APID, otherwise all domains subsequently assigned will fail because there will be no AP queues bound with an APQN containing the adapter ID. Assigning or un-assigning an AP adapter will be rejected if a guest using the mediated matrix device is running. The relevant sysfs structures are: /sys/devices/vfio_ap/matrix/ ...... [mdev_supported_types] ......... [vfio_ap-passthrough] ............ [devices] ...............[$uuid] .................. assign_adapter .................. unassign_adapter To assign an adapter to the $uuid mediated matrix device's APM, write the APID to the assign_adapter file. To unassign an adapter, write the APID to the unassign_adapter file. The APID is specified using conventional semantics: If it begins with 0x the number will be parsed as a hexadecimal number; if it begins with a 0 the number will be parsed as an octal number; otherwise, it will be parsed as a decimal number. For example, to assign adapter 173 (0xad) to the mediated matrix device $uuid: echo 173 > assign_adapter or echo 0xad > assign_adapter or echo 0255 > assign_adapter To unassign adapter 173 (0xad): echo 173 > unassign_adapter or echo 0xad > unassign_adapter or echo 0255 > unassign_adapter Signed-off-by: Tony Krowiak Reviewed-by: Halil Pasic Tested-by: Michael Mueller Tested-by: Farhan Ali Tested-by: Pierre Morel Reviewed-by: Cornelia Huck Message-Id: <20180925231641.4954-7-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- drivers/s390/crypto/vfio_ap_ops.c | 292 ++++++++++++++++++++++++++++++ 1 file changed, 292 insertions(+) diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 99ed30315f56..8a63473c5ae7 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -106,9 +106,301 @@ static struct attribute_group *vfio_ap_mdev_type_groups[] = { NULL, }; +struct vfio_ap_queue_reserved { + unsigned long *apid; + unsigned long *apqi; + bool reserved; +}; + +/** + * vfio_ap_has_queue + * + * @dev: an AP queue device + * @data: a struct vfio_ap_queue_reserved reference + * + * Flags whether the AP queue device (@dev) has a queue ID containing the APQN, + * apid or apqi specified in @data: + * + * - If @data contains both an apid and apqi value, then @data will be flagged + * as reserved if the APID and APQI fields for the AP queue device matches + * + * - If @data contains only an apid value, @data will be flagged as + * reserved if the APID field in the AP queue device matches + * + * - If @data contains only an apqi value, @data will be flagged as + * reserved if the APQI field in the AP queue device matches + * + * Returns 0 to indicate the input to function succeeded. Returns -EINVAL if + * @data does not contain either an apid or apqi. + */ +static int vfio_ap_has_queue(struct device *dev, void *data) +{ + struct vfio_ap_queue_reserved *qres = data; + struct ap_queue *ap_queue = to_ap_queue(dev); + ap_qid_t qid; + unsigned long id; + + if (qres->apid && qres->apqi) { + qid = AP_MKQID(*qres->apid, *qres->apqi); + if (qid == ap_queue->qid) + qres->reserved = true; + } else if (qres->apid && !qres->apqi) { + id = AP_QID_CARD(ap_queue->qid); + if (id == *qres->apid) + qres->reserved = true; + } else if (!qres->apid && qres->apqi) { + id = AP_QID_QUEUE(ap_queue->qid); + if (id == *qres->apqi) + qres->reserved = true; + } else { + return -EINVAL; + } + + return 0; +} + +/** + * vfio_ap_verify_queue_reserved + * + * @matrix_dev: a mediated matrix device + * @apid: an AP adapter ID + * @apqi: an AP queue index + * + * Verifies that the AP queue with @apid/@apqi is reserved by the VFIO AP device + * driver according to the following rules: + * + * - If both @apid and @apqi are not NULL, then there must be an AP queue + * device bound to the vfio_ap driver with the APQN identified by @apid and + * @apqi + * + * - If only @apid is not NULL, then there must be an AP queue device bound + * to the vfio_ap driver with an APQN containing @apid + * + * - If only @apqi is not NULL, then there must be an AP queue device bound + * to the vfio_ap driver with an APQN containing @apqi + * + * Returns 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL. + */ +static int vfio_ap_verify_queue_reserved(unsigned long *apid, + unsigned long *apqi) +{ + int ret; + struct vfio_ap_queue_reserved qres; + + qres.apid = apid; + qres.apqi = apqi; + qres.reserved = false; + + ret = driver_for_each_device(matrix_dev->device.driver, NULL, &qres, + vfio_ap_has_queue); + if (ret) + return ret; + + if (qres.reserved) + return 0; + + return -EADDRNOTAVAIL; +} + +static int +vfio_ap_mdev_verify_queues_reserved_for_apid(struct ap_matrix_mdev *matrix_mdev, + unsigned long apid) +{ + int ret; + unsigned long apqi; + unsigned long nbits = matrix_mdev->matrix.aqm_max + 1; + + if (find_first_bit_inv(matrix_mdev->matrix.aqm, nbits) >= nbits) + return vfio_ap_verify_queue_reserved(&apid, NULL); + + for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, nbits) { + ret = vfio_ap_verify_queue_reserved(&apid, &apqi); + if (ret) + return ret; + } + + return 0; +} + +/** + * vfio_ap_mdev_verify_no_sharing + * + * Verifies that the APQNs derived from the cross product of the AP adapter IDs + * and AP queue indexes comprising the AP matrix are not configured for another + * mediated device. AP queue sharing is not allowed. + * + * @matrix_mdev: the mediated matrix device + * + * Returns 0 if the APQNs are not shared, otherwise; returns -EADDRINUSE. + */ +static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev) +{ + struct ap_matrix_mdev *lstdev; + DECLARE_BITMAP(apm, AP_DEVICES); + DECLARE_BITMAP(aqm, AP_DOMAINS); + + list_for_each_entry(lstdev, &matrix_dev->mdev_list, node) { + if (matrix_mdev == lstdev) + continue; + + memset(apm, 0, sizeof(apm)); + memset(aqm, 0, sizeof(aqm)); + + /* + * We work on full longs, as we can only exclude the leftover + * bits in non-inverse order. The leftover is all zeros. + */ + if (!bitmap_and(apm, matrix_mdev->matrix.apm, + lstdev->matrix.apm, AP_DEVICES)) + continue; + + if (!bitmap_and(aqm, matrix_mdev->matrix.aqm, + lstdev->matrix.aqm, AP_DOMAINS)) + continue; + + return -EADDRINUSE; + } + + return 0; +} + +/** + * assign_adapter_store + * + * @dev: the matrix device + * @attr: the mediated matrix device's assign_adapter attribute + * @buf: a buffer containing the AP adapter number (APID) to + * be assigned + * @count: the number of bytes in @buf + * + * Parses the APID from @buf and sets the corresponding bit in the mediated + * matrix device's APM. + * + * Returns the number of bytes processed if the APID is valid; otherwise, + * returns one of the following errors: + * + * 1. -EINVAL + * The APID is not a valid number + * + * 2. -ENODEV + * The APID exceeds the maximum value configured for the system + * + * 3. -EADDRNOTAVAIL + * An APQN derived from the cross product of the APID being assigned + * and the APQIs previously assigned is not bound to the vfio_ap device + * driver; or, if no APQIs have yet been assigned, the APID is not + * contained in an APQN bound to the vfio_ap device driver. + * + * 4. -EADDRINUSE + * An APQN derived from the cross product of the APID being assigned + * and the APQIs previously assigned is being used by another mediated + * matrix device + */ +static ssize_t assign_adapter_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int ret; + unsigned long apid; + struct mdev_device *mdev = mdev_from_dev(dev); + struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + + ret = kstrtoul(buf, 0, &apid); + if (ret) + return ret; + + if (apid > matrix_mdev->matrix.apm_max) + return -ENODEV; + + /* + * Set the bit in the AP mask (APM) corresponding to the AP adapter + * number (APID). The bits in the mask, from most significant to least + * significant bit, correspond to APIDs 0-255. + */ + mutex_lock(&matrix_dev->lock); + + ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid); + if (ret) + goto done; + + set_bit_inv(apid, matrix_mdev->matrix.apm); + + ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev); + if (ret) + goto share_err; + + ret = count; + goto done; + +share_err: + clear_bit_inv(apid, matrix_mdev->matrix.apm); +done: + mutex_unlock(&matrix_dev->lock); + + return ret; +} +static DEVICE_ATTR_WO(assign_adapter); + +/** + * unassign_adapter_store + * + * @dev: the matrix device + * @attr: the mediated matrix device's unassign_adapter attribute + * @buf: a buffer containing the adapter number (APID) to be unassigned + * @count: the number of bytes in @buf + * + * Parses the APID from @buf and clears the corresponding bit in the mediated + * matrix device's APM. + * + * Returns the number of bytes processed if the APID is valid; otherwise, + * returns one of the following errors: + * -EINVAL if the APID is not a number + * -ENODEV if the APID it exceeds the maximum value configured for the + * system + */ +static ssize_t unassign_adapter_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int ret; + unsigned long apid; + struct mdev_device *mdev = mdev_from_dev(dev); + struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + + ret = kstrtoul(buf, 0, &apid); + if (ret) + return ret; + + if (apid > matrix_mdev->matrix.apm_max) + return -ENODEV; + + mutex_lock(&matrix_dev->lock); + clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm); + mutex_unlock(&matrix_dev->lock); + + return count; +} +DEVICE_ATTR_WO(unassign_adapter); + +static struct attribute *vfio_ap_mdev_attrs[] = { + &dev_attr_assign_adapter.attr, + &dev_attr_unassign_adapter.attr, + NULL +}; + +static struct attribute_group vfio_ap_mdev_attr_group = { + .attrs = vfio_ap_mdev_attrs +}; + +static const struct attribute_group *vfio_ap_mdev_attr_groups[] = { + &vfio_ap_mdev_attr_group, + NULL +}; + static const struct mdev_parent_ops vfio_ap_matrix_ops = { .owner = THIS_MODULE, .supported_type_groups = vfio_ap_mdev_type_groups, + .mdev_attr_groups = vfio_ap_mdev_attr_groups, .create = vfio_ap_mdev_create, .remove = vfio_ap_mdev_remove, }; From 3211da0c0b549f18d7b049abcffc73ca55ae51fd Mon Sep 17 00:00:00 2001 From: Tony Krowiak Date: Tue, 25 Sep 2018 19:16:22 -0400 Subject: [PATCH 007/204] s390: vfio-ap: sysfs interfaces to configure domains Introduces two new sysfs attributes for the VFIO mediated matrix device for assigning AP domains to and unassigning AP domains from a mediated matrix device. The IDs of the AP domains assigned to the mediated matrix device will be stored in an AP queue mask (AQM). The bits in the AQM, from most significant to least significant bit, correspond to AP queue index (APQI) 0 to 255 (note that an APQI is synonymous with with a domain ID). On some systems, the maximum allowable domain number may be less than 255 - depending upon the host's AP configuration - and assignment may be rejected if the input domain ID exceeds the limit. When a domain is assigned, the bit corresponding to the APQI will be set in the AQM. Likewise, when a domain is unassigned, the bit corresponding to the APQI will be cleared from the AQM. In order to successfully assign a domain, the APQNs derived from the domain ID being assigned and the adapter numbers of all adapters previously assigned: 1. Must be bound to the vfio_ap device driver. 2. Must not be assigned to any other mediated matrix device. If there are no adapters assigned to the mdev, then there must be an AP queue bound to the vfio_ap device driver with an APQN containing the domain ID (i.e., APQI), otherwise all adapters subsequently assigned will fail because there will be no AP queues bound with an APQN containing the APQI. Assigning or un-assigning an AP domain will also be rejected if a guest using the mediated matrix device is running. The relevant sysfs structures are: /sys/devices/vfio_ap/matrix/ ...... [mdev_supported_types] ......... [vfio_ap-passthrough] ............ [devices] ...............[$uuid] .................. assign_domain .................. unassign_domain To assign a domain to the $uuid mediated matrix device, write the domain's ID to the assign_domain file. To unassign a domain, write the domain's ID to the unassign_domain file. The ID is specified using conventional semantics: If it begins with 0x, the number will be parsed as a hexadecimal (case insensitive) number; if it begins with 0, it will be parsed as an octal number; otherwise, it will be parsed as a decimal number. For example, to assign domain 173 (0xad) to the mediated matrix device $uuid: echo 173 > assign_domain or echo 0255 > assign_domain or echo 0xad > assign_domain To unassign domain 173 (0xad): echo 173 > unassign_domain or echo 0255 > unassign_domain or echo 0xad > unassign_domain Signed-off-by: Tony Krowiak Reviewed-by: Halil Pasic Reviewed-by: Cornelia Huck Tested-by: Michael Mueller Tested-by: Farhan Ali Tested-by: Pierre Morel Message-Id: <20180925231641.4954-8-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- drivers/s390/crypto/vfio_ap_ops.c | 138 +++++++++++++++++++++++++++++- 1 file changed, 137 insertions(+), 1 deletion(-) diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 8a63473c5ae7..e9a2ad4ddfd6 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -382,10 +382,146 @@ static ssize_t unassign_adapter_store(struct device *dev, } DEVICE_ATTR_WO(unassign_adapter); +static int +vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev, + unsigned long apqi) +{ + int ret; + unsigned long apid; + unsigned long nbits = matrix_mdev->matrix.apm_max + 1; + + if (find_first_bit_inv(matrix_mdev->matrix.apm, nbits) >= nbits) + return vfio_ap_verify_queue_reserved(NULL, &apqi); + + for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, nbits) { + ret = vfio_ap_verify_queue_reserved(&apid, &apqi); + if (ret) + return ret; + } + + return 0; +} + +/** + * assign_domain_store + * + * @dev: the matrix device + * @attr: the mediated matrix device's assign_domain attribute + * @buf: a buffer containing the AP queue index (APQI) of the domain to + * be assigned + * @count: the number of bytes in @buf + * + * Parses the APQI from @buf and sets the corresponding bit in the mediated + * matrix device's AQM. + * + * Returns the number of bytes processed if the APQI is valid; otherwise returns + * one of the following errors: + * + * 1. -EINVAL + * The APQI is not a valid number + * + * 2. -ENODEV + * The APQI exceeds the maximum value configured for the system + * + * 3. -EADDRNOTAVAIL + * An APQN derived from the cross product of the APQI being assigned + * and the APIDs previously assigned is not bound to the vfio_ap device + * driver; or, if no APIDs have yet been assigned, the APQI is not + * contained in an APQN bound to the vfio_ap device driver. + * + * 4. -EADDRINUSE + * An APQN derived from the cross product of the APQI being assigned + * and the APIDs previously assigned is being used by another mediated + * matrix device + */ +static ssize_t assign_domain_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int ret; + unsigned long apqi; + struct mdev_device *mdev = mdev_from_dev(dev); + struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + unsigned long max_apqi = matrix_mdev->matrix.aqm_max; + + ret = kstrtoul(buf, 0, &apqi); + if (ret) + return ret; + if (apqi > max_apqi) + return -ENODEV; + + mutex_lock(&matrix_dev->lock); + + ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi); + if (ret) + goto done; + + set_bit_inv(apqi, matrix_mdev->matrix.aqm); + + ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev); + if (ret) + goto share_err; + + ret = count; + goto done; + +share_err: + clear_bit_inv(apqi, matrix_mdev->matrix.aqm); +done: + mutex_unlock(&matrix_dev->lock); + + return ret; +} +DEVICE_ATTR_WO(assign_domain); + + +/** + * unassign_domain_store + * + * @dev: the matrix device + * @attr: the mediated matrix device's unassign_domain attribute + * @buf: a buffer containing the AP queue index (APQI) of the domain to + * be unassigned + * @count: the number of bytes in @buf + * + * Parses the APQI from @buf and clears the corresponding bit in the + * mediated matrix device's AQM. + * + * Returns the number of bytes processed if the APQI is valid; otherwise, + * returns one of the following errors: + * -EINVAL if the APQI is not a number + * -ENODEV if the APQI exceeds the maximum value configured for the system + */ +static ssize_t unassign_domain_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int ret; + unsigned long apqi; + struct mdev_device *mdev = mdev_from_dev(dev); + struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + + ret = kstrtoul(buf, 0, &apqi); + if (ret) + return ret; + + if (apqi > matrix_mdev->matrix.aqm_max) + return -ENODEV; + + mutex_lock(&matrix_dev->lock); + clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm); + mutex_unlock(&matrix_dev->lock); + + return count; +} +DEVICE_ATTR_WO(unassign_domain); + static struct attribute *vfio_ap_mdev_attrs[] = { &dev_attr_assign_adapter.attr, &dev_attr_unassign_adapter.attr, - NULL + &dev_attr_assign_domain.attr, + &dev_attr_unassign_domain.attr, + NULL, }; static struct attribute_group vfio_ap_mdev_attr_group = { From 3b1eab7fb9da69c9af129058ed5aebb93d40d623 Mon Sep 17 00:00:00 2001 From: Tony Krowiak Date: Tue, 25 Sep 2018 19:16:23 -0400 Subject: [PATCH 008/204] s390: vfio-ap: sysfs interfaces to configure control domains Provides the sysfs interfaces for: 1. Assigning AP control domains to the mediated matrix device 2. Unassigning AP control domains from a mediated matrix device 3. Displaying the control domains assigned to a mediated matrix device The IDs of the AP control domains assigned to the mediated matrix device are stored in an AP domain mask (ADM). The bits in the ADM, from most significant to least significant bit, correspond to AP domain numbers 0 to 255. On some systems, the maximum allowable domain number may be less than 255 - depending upon the host's AP configuration - and assignment may be rejected if the input domain ID exceeds the limit. When a control domain is assigned, the bit corresponding its domain ID will be set in the ADM. Likewise, when a domain is unassigned, the bit corresponding to its domain ID will be cleared in the ADM. The relevant sysfs structures are: /sys/devices/vfio_ap/matrix/ ...... [mdev_supported_types] ......... [vfio_ap-passthrough] ............ [devices] ...............[$uuid] .................. assign_control_domain .................. unassign_control_domain To assign a control domain to the $uuid mediated matrix device's ADM, write its domain number to the assign_control_domain file. To unassign a domain, write its domain number to the unassign_control_domain file. The domain number is specified using conventional semantics: If it begins with 0x the number will be parsed as a hexadecimal (case insensitive) number; if it begins with 0, it is parsed as an octal number; otherwise, it will be parsed as a decimal number. For example, to assign control domain 173 (0xad) to the mediated matrix device $uuid: echo 173 > assign_control_domain or echo 0255 > assign_control_domain or echo 0xad > assign_control_domain To unassign control domain 173 (0xad): echo 173 > unassign_control_domain or echo 0255 > unassign_control_domain or echo 0xad > unassign_control_domain The assignment will be rejected if the APQI exceeds the maximum value for an AP domain: * If the AP Extended Addressing (APXA) facility is installed, the max value is 255 * Else the max value is 15 Signed-off-by: Tony Krowiak Reviewed-by: Halil Pasic Reviewed-by: Cornelia Huck Tested-by: Michael Mueller Tested-by: Farhan Ali Tested-by: Pierre Morel Message-Id: <20180925231641.4954-9-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- drivers/s390/crypto/vfio_ap_ops.c | 112 ++++++++++++++++++++++++++++++ 1 file changed, 112 insertions(+) diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index e9a2ad4ddfd6..a6c08cd2ed0f 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -516,11 +516,123 @@ static ssize_t unassign_domain_store(struct device *dev, } DEVICE_ATTR_WO(unassign_domain); +/** + * assign_control_domain_store + * + * @dev: the matrix device + * @attr: the mediated matrix device's assign_control_domain attribute + * @buf: a buffer containing the domain ID to be assigned + * @count: the number of bytes in @buf + * + * Parses the domain ID from @buf and sets the corresponding bit in the mediated + * matrix device's ADM. + * + * Returns the number of bytes processed if the domain ID is valid; otherwise, + * returns one of the following errors: + * -EINVAL if the ID is not a number + * -ENODEV if the ID exceeds the maximum value configured for the system + */ +static ssize_t assign_control_domain_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int ret; + unsigned long id; + struct mdev_device *mdev = mdev_from_dev(dev); + struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + + ret = kstrtoul(buf, 0, &id); + if (ret) + return ret; + + if (id > matrix_mdev->matrix.adm_max) + return -ENODEV; + + /* Set the bit in the ADM (bitmask) corresponding to the AP control + * domain number (id). The bits in the mask, from most significant to + * least significant, correspond to IDs 0 up to the one less than the + * number of control domains that can be assigned. + */ + mutex_lock(&matrix_dev->lock); + set_bit_inv(id, matrix_mdev->matrix.adm); + mutex_unlock(&matrix_dev->lock); + + return count; +} +DEVICE_ATTR_WO(assign_control_domain); + +/** + * unassign_control_domain_store + * + * @dev: the matrix device + * @attr: the mediated matrix device's unassign_control_domain attribute + * @buf: a buffer containing the domain ID to be unassigned + * @count: the number of bytes in @buf + * + * Parses the domain ID from @buf and clears the corresponding bit in the + * mediated matrix device's ADM. + * + * Returns the number of bytes processed if the domain ID is valid; otherwise, + * returns one of the following errors: + * -EINVAL if the ID is not a number + * -ENODEV if the ID exceeds the maximum value configured for the system + */ +static ssize_t unassign_control_domain_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int ret; + unsigned long domid; + struct mdev_device *mdev = mdev_from_dev(dev); + struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + unsigned long max_domid = matrix_mdev->matrix.adm_max; + + ret = kstrtoul(buf, 0, &domid); + if (ret) + return ret; + if (domid > max_domid) + return -ENODEV; + + mutex_lock(&matrix_dev->lock); + clear_bit_inv(domid, matrix_mdev->matrix.adm); + mutex_unlock(&matrix_dev->lock); + + return count; +} +DEVICE_ATTR_WO(unassign_control_domain); + +static ssize_t control_domains_show(struct device *dev, + struct device_attribute *dev_attr, + char *buf) +{ + unsigned long id; + int nchars = 0; + int n; + char *bufpos = buf; + struct mdev_device *mdev = mdev_from_dev(dev); + struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + unsigned long max_domid = matrix_mdev->matrix.adm_max; + + mutex_lock(&matrix_dev->lock); + for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) { + n = sprintf(bufpos, "%04lx\n", id); + bufpos += n; + nchars += n; + } + mutex_unlock(&matrix_dev->lock); + + return nchars; +} +DEVICE_ATTR_RO(control_domains); + static struct attribute *vfio_ap_mdev_attrs[] = { &dev_attr_assign_adapter.attr, &dev_attr_unassign_adapter.attr, &dev_attr_assign_domain.attr, &dev_attr_unassign_domain.attr, + &dev_attr_assign_control_domain.attr, + &dev_attr_unassign_control_domain.attr, + &dev_attr_control_domains.attr, NULL, }; From 81b2b4b76a733ee479af359db59e1c91e3aad4cd Mon Sep 17 00:00:00 2001 From: Tony Krowiak Date: Tue, 25 Sep 2018 19:16:24 -0400 Subject: [PATCH 009/204] s390: vfio-ap: sysfs interface to view matrix mdev matrix Provides a sysfs interface to view the AP matrix configured for the mediated matrix device. The relevant sysfs structures are: /sys/devices/vfio_ap/matrix/ ...... [mdev_supported_types] ......... [vfio_ap-passthrough] ............ [devices] ...............[$uuid] .................. matrix To view the matrix configured for the mediated matrix device, print the matrix file: cat matrix Below are examples of the output from the above command: Example 1: Adapters and domains assigned Assignments: Adapters 5 and 6 Domains 4 and 71 (0x47) Output 05.0004 05.0047 06.0004 06.0047 Examples 2: Only adapters assigned Assignments: Adapters 5 and 6 Output: 05. 06. Examples 3: Only domains assigned Assignments: Domains 4 and 71 (0x47) Output: .0004 .0047 Signed-off-by: Tony Krowiak Reviewed-by: Halil Pasic Reviewed-by: Cornelia Huck Tested-by: Michael Mueller Tested-by: Farhan Ali Tested-by: Pierre Morel Message-Id: <20180925231641.4954-10-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- drivers/s390/crypto/vfio_ap_ops.c | 51 +++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index a6c08cd2ed0f..3834624dcc2e 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -625,6 +625,56 @@ static ssize_t control_domains_show(struct device *dev, } DEVICE_ATTR_RO(control_domains); +static ssize_t matrix_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct mdev_device *mdev = mdev_from_dev(dev); + struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + char *bufpos = buf; + unsigned long apid; + unsigned long apqi; + unsigned long apid1; + unsigned long apqi1; + unsigned long napm_bits = matrix_mdev->matrix.apm_max + 1; + unsigned long naqm_bits = matrix_mdev->matrix.aqm_max + 1; + int nchars = 0; + int n; + + apid1 = find_first_bit_inv(matrix_mdev->matrix.apm, napm_bits); + apqi1 = find_first_bit_inv(matrix_mdev->matrix.aqm, naqm_bits); + + mutex_lock(&matrix_dev->lock); + + if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) { + for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) { + for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, + naqm_bits) { + n = sprintf(bufpos, "%02lx.%04lx\n", apid, + apqi); + bufpos += n; + nchars += n; + } + } + } else if (apid1 < napm_bits) { + for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) { + n = sprintf(bufpos, "%02lx.\n", apid); + bufpos += n; + nchars += n; + } + } else if (apqi1 < naqm_bits) { + for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, naqm_bits) { + n = sprintf(bufpos, ".%04lx\n", apqi); + bufpos += n; + nchars += n; + } + } + + mutex_unlock(&matrix_dev->lock); + + return nchars; +} +DEVICE_ATTR_RO(matrix); + static struct attribute *vfio_ap_mdev_attrs[] = { &dev_attr_assign_adapter.attr, &dev_attr_unassign_adapter.attr, @@ -633,6 +683,7 @@ static struct attribute *vfio_ap_mdev_attrs[] = { &dev_attr_assign_control_domain.attr, &dev_attr_unassign_control_domain.attr, &dev_attr_control_domains.attr, + &dev_attr_matrix.attr, NULL, }; From 42104598ef2e8c3ce532ebec891c9edec161e508 Mon Sep 17 00:00:00 2001 From: Tony Krowiak Date: Tue, 25 Sep 2018 19:16:25 -0400 Subject: [PATCH 010/204] KVM: s390: interface to clear CRYCB masks Introduces a new KVM function to clear the APCB0 and APCB1 in the guest's CRYCB. This effectively clears all bits of the APM, AQM and ADM masks configured for the guest. The VCPUs are taken out of SIE to ensure the VCPUs do not get out of sync. Signed-off-by: Tony Krowiak Acked-by: Halil Pasic Tested-by: Michael Mueller Tested-by: Farhan Ali Tested-by: Pierre Morel Reviewed-by: Cornelia Huck Message-Id: <20180925231641.4954-11-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 2 ++ arch/s390/kvm/kvm-s390.c | 15 +++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 79fa0a3a777f..0a78036a802b 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -859,6 +859,8 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work); +void kvm_arch_crypto_clear_masks(struct kvm *kvm); + extern int sie64a(struct kvm_s390_sie_block *, u64 *); extern char sie_exit; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index d62f6d840f8c..75aa5aa4a926 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2033,6 +2033,21 @@ static void kvm_s390_set_crycb_format(struct kvm *kvm) kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; } +void kvm_arch_crypto_clear_masks(struct kvm *kvm) +{ + mutex_lock(&kvm->lock); + kvm_s390_vcpu_block_all(kvm); + + memset(&kvm->arch.crypto.crycb->apcb0, 0, + sizeof(kvm->arch.crypto.crycb->apcb0)); + memset(&kvm->arch.crypto.crycb->apcb1, 0, + sizeof(kvm->arch.crypto.crycb->apcb1)); + + kvm_s390_vcpu_unblock_all(kvm); + mutex_unlock(&kvm->lock); +} +EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks); + static u64 kvm_s390_get_initial_cpuid(void) { struct cpuid cpuid; From 258287c994de8f2f52430b8d79b4fdf2c95f8c91 Mon Sep 17 00:00:00 2001 From: Tony Krowiak Date: Tue, 25 Sep 2018 19:16:26 -0400 Subject: [PATCH 011/204] s390: vfio-ap: implement mediated device open callback Implements the open callback on the mediated matrix device. The function registers a group notifier to receive notification of the VFIO_GROUP_NOTIFY_SET_KVM event. When notified, the vfio_ap device driver will get access to the guest's kvm structure. The open callback must ensure that only one mediated device shall be opened per guest. Signed-off-by: Tony Krowiak Acked-by: Halil Pasic Tested-by: Michael Mueller Tested-by: Farhan Ali Tested-by: Pierre Morel Acked-by: Pierre Morel Reviewed-by: Cornelia Huck Message-Id: <20180925231641.4954-12-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 1 + drivers/s390/crypto/vfio_ap_ops.c | 161 ++++++++++++++++++++++++++ drivers/s390/crypto/vfio_ap_private.h | 5 + 3 files changed, 167 insertions(+) diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 0a78036a802b..36d35313e840 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -258,6 +258,7 @@ struct kvm_s390_sie_block { __u64 tecmc; /* 0x00e8 */ __u8 reservedf0[12]; /* 0x00f0 */ #define CRYCB_FORMAT_MASK 0x00000003 +#define CRYCB_FORMAT0 0x00000000 #define CRYCB_FORMAT1 0x00000001 #define CRYCB_FORMAT2 0x00000003 __u32 crycbd; /* 0x00fc */ diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 3834624dcc2e..1fd0beefeda6 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -13,6 +13,10 @@ #include #include #include +#include +#include +#include +#include #include #include "vfio_ap_private.h" @@ -54,6 +58,9 @@ static int vfio_ap_mdev_remove(struct mdev_device *mdev) { struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + if (matrix_mdev->kvm) + return -EBUSY; + mutex_lock(&matrix_dev->lock); list_del(&matrix_mdev->node); mutex_unlock(&matrix_dev->lock); @@ -305,6 +312,10 @@ static ssize_t assign_adapter_store(struct device *dev, struct mdev_device *mdev = mdev_from_dev(dev); struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + /* If the guest is running, disallow assignment of adapter */ + if (matrix_mdev->kvm) + return -EBUSY; + ret = kstrtoul(buf, 0, &apid); if (ret) return ret; @@ -367,6 +378,10 @@ static ssize_t unassign_adapter_store(struct device *dev, struct mdev_device *mdev = mdev_from_dev(dev); struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + /* If the guest is running, disallow un-assignment of adapter */ + if (matrix_mdev->kvm) + return -EBUSY; + ret = kstrtoul(buf, 0, &apid); if (ret) return ret; @@ -444,6 +459,10 @@ static ssize_t assign_domain_store(struct device *dev, struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); unsigned long max_apqi = matrix_mdev->matrix.aqm_max; + /* If the guest is running, disallow assignment of domain */ + if (matrix_mdev->kvm) + return -EBUSY; + ret = kstrtoul(buf, 0, &apqi); if (ret) return ret; @@ -501,6 +520,10 @@ static ssize_t unassign_domain_store(struct device *dev, struct mdev_device *mdev = mdev_from_dev(dev); struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + /* If the guest is running, disallow un-assignment of domain */ + if (matrix_mdev->kvm) + return -EBUSY; + ret = kstrtoul(buf, 0, &apqi); if (ret) return ret; @@ -541,6 +564,10 @@ static ssize_t assign_control_domain_store(struct device *dev, struct mdev_device *mdev = mdev_from_dev(dev); struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + /* If the guest is running, disallow assignment of control domain */ + if (matrix_mdev->kvm) + return -EBUSY; + ret = kstrtoul(buf, 0, &id); if (ret) return ret; @@ -587,6 +614,10 @@ static ssize_t unassign_control_domain_store(struct device *dev, struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); unsigned long max_domid = matrix_mdev->matrix.adm_max; + /* If the guest is running, disallow un-assignment of control domain */ + if (matrix_mdev->kvm) + return -EBUSY; + ret = kstrtoul(buf, 0, &domid); if (ret) return ret; @@ -696,12 +727,142 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = { NULL }; +static void vfio_ap_mdev_copy_masks(struct ap_matrix_mdev *matrix_mdev) +{ + int nbytes; + unsigned long *apm, *aqm, *adm; + struct kvm_s390_crypto_cb *crycb = matrix_mdev->kvm->arch.crypto.crycb; + + switch (matrix_mdev->kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { + case CRYCB_FORMAT2: + apm = (unsigned long *)crycb->apcb1.apm; + aqm = (unsigned long *)crycb->apcb1.aqm; + adm = (unsigned long *)crycb->apcb1.adm; + break; + case CRYCB_FORMAT1: + case CRYCB_FORMAT0: + apm = (unsigned long *)crycb->apcb0.apm; + aqm = (unsigned long *)crycb->apcb0.aqm; + adm = (unsigned long *)crycb->apcb0.adm; + break; + default: + /* cannot happen */ + return; + } + + nbytes = DIV_ROUND_UP(matrix_mdev->matrix.apm_max + 1, BITS_PER_BYTE); + memcpy(apm, matrix_mdev->matrix.apm, nbytes); + nbytes = DIV_ROUND_UP(matrix_mdev->matrix.aqm_max + 1, BITS_PER_BYTE); + memcpy(aqm, matrix_mdev->matrix.aqm, nbytes); + nbytes = DIV_ROUND_UP(matrix_mdev->matrix.adm_max + 1, BITS_PER_BYTE); + memcpy(adm, matrix_mdev->matrix.adm, nbytes); +} + +/** + * vfio_ap_mdev_set_kvm + * + * @matrix_mdev: a mediated matrix device + * @kvm: reference to KVM instance + * + * Verifies no other mediated matrix device has @kvm and sets a reference to + * it in @matrix_mdev->kvm. + * + * Return 0 if no other mediated matrix device has a reference to @kvm; + * otherwise, returns an -EPERM. + */ +static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev, + struct kvm *kvm) +{ + struct ap_matrix_mdev *m; + + mutex_lock(&matrix_dev->lock); + + list_for_each_entry(m, &matrix_dev->mdev_list, node) { + if ((m != matrix_mdev) && (m->kvm == kvm)) { + mutex_unlock(&matrix_dev->lock); + return -EPERM; + } + } + + matrix_mdev->kvm = kvm; + mutex_unlock(&matrix_dev->lock); + + return 0; +} + +static int vfio_ap_mdev_group_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + int ret; + struct ap_matrix_mdev *matrix_mdev; + + if (action != VFIO_GROUP_NOTIFY_SET_KVM) + return NOTIFY_OK; + + matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier); + + if (!data) { + matrix_mdev->kvm = NULL; + return NOTIFY_OK; + } + + ret = vfio_ap_mdev_set_kvm(matrix_mdev, data); + if (ret) + return NOTIFY_DONE; + + /* If there is no CRYCB pointer, then we can't copy the masks */ + if (!matrix_mdev->kvm->arch.crypto.crycbd) + return NOTIFY_DONE; + + vfio_ap_mdev_copy_masks(matrix_mdev); + + return NOTIFY_OK; +} + +static int vfio_ap_mdev_open(struct mdev_device *mdev) +{ + struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + unsigned long events; + int ret; + + + if (!try_module_get(THIS_MODULE)) + return -ENODEV; + + matrix_mdev->group_notifier.notifier_call = vfio_ap_mdev_group_notifier; + events = VFIO_GROUP_NOTIFY_SET_KVM; + + ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, + &events, &matrix_mdev->group_notifier); + if (ret) { + module_put(THIS_MODULE); + return ret; + } + + return 0; +} + +static void vfio_ap_mdev_release(struct mdev_device *mdev) +{ + struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + + if (matrix_mdev->kvm) + kvm_arch_crypto_clear_masks(matrix_mdev->kvm); + + vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, + &matrix_mdev->group_notifier); + matrix_mdev->kvm = NULL; + module_put(THIS_MODULE); +} + static const struct mdev_parent_ops vfio_ap_matrix_ops = { .owner = THIS_MODULE, .supported_type_groups = vfio_ap_mdev_type_groups, .mdev_attr_groups = vfio_ap_mdev_attr_groups, .create = vfio_ap_mdev_create, .remove = vfio_ap_mdev_remove, + .open = vfio_ap_mdev_open, + .release = vfio_ap_mdev_release, }; int vfio_ap_mdev_register(void) diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h index 9f197ffab7ad..5675492233c7 100644 --- a/drivers/s390/crypto/vfio_ap_private.h +++ b/drivers/s390/crypto/vfio_ap_private.h @@ -71,10 +71,15 @@ struct ap_matrix { * @list: allows the ap_matrix_mdev struct to be added to a list * @matrix: the adapters, usage domains and control domains assigned to the * mediated matrix device. + * @group_notifier: notifier block used for specifying callback function for + * handling the VFIO_GROUP_NOTIFY_SET_KVM event + * @kvm: the struct holding guest's state */ struct ap_matrix_mdev { struct list_head node; struct ap_matrix matrix; + struct notifier_block group_notifier; + struct kvm *kvm; }; extern int vfio_ap_mdev_register(void); From e06670c5fe3b3a55547e2caeaec34acfdb4885e3 Mon Sep 17 00:00:00 2001 From: Tony Krowiak Date: Tue, 25 Sep 2018 19:16:27 -0400 Subject: [PATCH 012/204] s390: vfio-ap: implement VFIO_DEVICE_GET_INFO ioctl Adds support for the VFIO_DEVICE_GET_INFO ioctl to the VFIO AP Matrix device driver. This is a minimal implementation, as vfio-ap does not use I/O regions. Signed-off-by: Tony Krowiak Reviewed-by: Pierre Morel Reviewed-by: Cornelia Huck Acked-by: Halil Pasic Tested-by: Michael Mueller Tested-by: Farhan Ali Tested-by: Pierre Morel Message-Id: <20180925231641.4954-13-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- drivers/s390/crypto/vfio_ap_ops.c | 38 +++++++++++++++++++++++++++++++ include/uapi/linux/vfio.h | 1 + 2 files changed, 39 insertions(+) diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 1fd0beefeda6..974cf06d8a5c 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -855,6 +855,43 @@ static void vfio_ap_mdev_release(struct mdev_device *mdev) module_put(THIS_MODULE); } +static int vfio_ap_mdev_get_device_info(unsigned long arg) +{ + unsigned long minsz; + struct vfio_device_info info; + + minsz = offsetofend(struct vfio_device_info, num_irqs); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + info.flags = VFIO_DEVICE_FLAGS_AP; + info.num_regions = 0; + info.num_irqs = 0; + + return copy_to_user((void __user *)arg, &info, minsz); +} + +static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev, + unsigned int cmd, unsigned long arg) +{ + int ret; + + switch (cmd) { + case VFIO_DEVICE_GET_INFO: + ret = vfio_ap_mdev_get_device_info(arg); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + static const struct mdev_parent_ops vfio_ap_matrix_ops = { .owner = THIS_MODULE, .supported_type_groups = vfio_ap_mdev_type_groups, @@ -863,6 +900,7 @@ static const struct mdev_parent_ops vfio_ap_matrix_ops = { .remove = vfio_ap_mdev_remove, .open = vfio_ap_mdev_open, .release = vfio_ap_mdev_release, + .ioctl = vfio_ap_mdev_ioctl, }; int vfio_ap_mdev_register(void) diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index bfbe2be8f369..f378b9802d8b 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -200,6 +200,7 @@ struct vfio_device_info { #define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */ #define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */ #define VFIO_DEVICE_FLAGS_CCW (1 << 4) /* vfio-ccw device */ +#define VFIO_DEVICE_FLAGS_AP (1 << 5) /* vfio-ap device */ __u32 num_regions; /* Max region index + 1 */ __u32 num_irqs; /* Max IRQ index + 1 */ }; From 46a7263d4746a2659edafcb885e91e58bb6d3a2e Mon Sep 17 00:00:00 2001 From: Tony Krowiak Date: Tue, 25 Sep 2018 19:16:28 -0400 Subject: [PATCH 013/204] s390: vfio-ap: zeroize the AP queues Let's call PAPQ(ZAPQ) to zeroize a queue for each queue configured for a mediated matrix device when it is released. Zeroizing a queue resets the queue, clears all pending messages for the queue entries and disables adapter interruptions associated with the queue. Signed-off-by: Tony Krowiak Reviewed-by: Halil Pasic Tested-by: Michael Mueller Tested-by: Farhan Ali Reviewed-by: Cornelia Huck Message-Id: <20180925231641.4954-14-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- drivers/s390/crypto/vfio_ap_ops.c | 49 +++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 974cf06d8a5c..619900c56cd6 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -819,6 +819,54 @@ static int vfio_ap_mdev_group_notifier(struct notifier_block *nb, return NOTIFY_OK; } +static int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi, + unsigned int retry) +{ + struct ap_queue_status status; + + do { + status = ap_zapq(AP_MKQID(apid, apqi)); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + return 0; + case AP_RESPONSE_RESET_IN_PROGRESS: + case AP_RESPONSE_BUSY: + msleep(20); + break; + default: + /* things are really broken, give up */ + return -EIO; + } + } while (retry--); + + return -EBUSY; +} + +static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev) +{ + int ret; + int rc = 0; + unsigned long apid, apqi; + struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + + for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, + matrix_mdev->matrix.apm_max + 1) { + for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, + matrix_mdev->matrix.aqm_max + 1) { + ret = vfio_ap_mdev_reset_queue(apid, apqi, 1); + /* + * Regardless whether a queue turns out to be busy, or + * is not operational, we need to continue resetting + * the remaining queues. + */ + if (ret) + rc = ret; + } + } + + return rc; +} + static int vfio_ap_mdev_open(struct mdev_device *mdev) { struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); @@ -849,6 +897,7 @@ static void vfio_ap_mdev_release(struct mdev_device *mdev) if (matrix_mdev->kvm) kvm_arch_crypto_clear_masks(matrix_mdev->kvm); + vfio_ap_mdev_reset_queues(mdev); vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &matrix_mdev->group_notifier); matrix_mdev->kvm = NULL; From cd8a377e3b40002cd7983b53f384456393908b7e Mon Sep 17 00:00:00 2001 From: Tony Krowiak Date: Tue, 25 Sep 2018 19:16:29 -0400 Subject: [PATCH 014/204] s390: vfio-ap: implement VFIO_DEVICE_RESET ioctl Implements the VFIO_DEVICE_RESET ioctl. This ioctl zeroizes all of the AP queues assigned to the guest. Signed-off-by: Tony Krowiak Reviewed-by: Halil Pasic Reviewed-by: Pierre Morel Reviewed-by: Cornelia Huck Tested-by: Michael Mueller Tested-by: Farhan Ali Tested-by: Pierre Morel Message-Id: <20180925231641.4954-15-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- drivers/s390/crypto/vfio_ap_ops.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 619900c56cd6..d3d9eb72b0f1 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -917,7 +917,7 @@ static int vfio_ap_mdev_get_device_info(unsigned long arg) if (info.argsz < minsz) return -EINVAL; - info.flags = VFIO_DEVICE_FLAGS_AP; + info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET; info.num_regions = 0; info.num_irqs = 0; @@ -933,6 +933,9 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev, case VFIO_DEVICE_GET_INFO: ret = vfio_ap_mdev_get_device_info(arg); break; + case VFIO_DEVICE_RESET: + ret = vfio_ap_mdev_reset_queues(mdev); + break; default: ret = -EOPNOTSUPP; break; From 6cc571b1b1e8b6fbcf69411d115cf9d9be866276 Mon Sep 17 00:00:00 2001 From: Pierre Morel Date: Tue, 25 Sep 2018 19:16:30 -0400 Subject: [PATCH 015/204] KVM: s390: Clear Crypto Control Block when using vSIE When we clear the Crypto Control Block (CRYCB) used by a guest level 2, the vSIE shadow CRYCB for guest level 3 must be updated before the guest uses it. We achieve this by using the KVM_REQ_VSIE_RESTART synchronous request for each vCPU belonging to the guest to force the reload of the shadow CRYCB before rerunning the guest level 3. Signed-off-by: Pierre Morel Signed-off-by: Tony Krowiak Message-Id: <20180925231641.4954-16-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 75aa5aa4a926..c94ef2d0dbe4 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2043,6 +2043,8 @@ void kvm_arch_crypto_clear_masks(struct kvm *kvm) memset(&kvm->arch.crypto.crycb->apcb1, 0, sizeof(kvm->arch.crypto.crycb->apcb1)); + /* recreate the shadow crycb for each vcpu */ + kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); kvm_s390_vcpu_unblock_all(kvm); mutex_unlock(&kvm->lock); } From d6f6959ac5877885fbba8b0efe09e6103fa1da80 Mon Sep 17 00:00:00 2001 From: Pierre Morel Date: Tue, 25 Sep 2018 19:16:31 -0400 Subject: [PATCH 016/204] KVM: s390: vsie: Do the CRYCB validation first We need to handle the validity checks for the crycb, no matter what the settings for the keywrappings are. So lets move the keywrapping checks after we have done the validy checks. Signed-off-by: Pierre Morel Signed-off-by: Tony Krowiak Reviewed-by: Janosch Frank Reviewed-by: David Hildenbrand Message-Id: <20180925231641.4954-17-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- arch/s390/kvm/vsie.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 12b970701c26..38ea5da4e642 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -161,17 +161,18 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) /* format-1 is supported with message-security-assist extension 3 */ if (!test_kvm_facility(vcpu->kvm, 76)) return 0; - /* we may only allow it if enabled for guest 2 */ - ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 & - (ECB3_AES | ECB3_DEA); - if (!ecb3_flags) - return 0; if ((crycb_addr & PAGE_MASK) != ((crycb_addr + 128) & PAGE_MASK)) return set_validity_icpt(scb_s, 0x003CU); else if (!crycb_addr) return set_validity_icpt(scb_s, 0x0039U); + /* we may only allow it if enabled for guest 2 */ + ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 & + (ECB3_AES | ECB3_DEA); + if (!ecb3_flags) + return 0; + /* copy only the wrapping keys */ if (read_guest_real(vcpu, crycb_addr + 72, vsie_page->crycb.dea_wrapping_key_mask, 56)) From 3af84def9cbf38f2a13c0a8e1a74721ad762747e Mon Sep 17 00:00:00 2001 From: Pierre Morel Date: Tue, 25 Sep 2018 19:16:32 -0400 Subject: [PATCH 017/204] KVM: s390: vsie: Make use of CRYCB FORMAT2 clear The comment preceding the shadow_crycb function is misleading, we effectively accept FORMAT2 CRYCB in the guest. When using FORMAT2 in the host we do not need to or with FORMAT1. Signed-off-by: Pierre Morel Signed-off-by: Tony Krowiak Reviewed-by: Janosch Frank Reviewed-by: David Hildenbrand Message-Id: <20180925231641.4954-18-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- arch/s390/kvm/vsie.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 38ea5da4e642..e0e6fbfa88f4 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -140,7 +140,8 @@ static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) * Create a shadow copy of the crycb block and setup key wrapping, if * requested for guest 3 and enabled for guest 2. * - * We only accept format-1 (no AP in g2), but convert it into format-2 + * We accept format-1 or format-2, but we treat it as a format-1 (no AP in g2), + * and we convert it into format-2 in the shadow CRYCB. * There is nothing to do for format-0. * * Returns: - 0 if shadowed or nothing to do @@ -179,8 +180,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) return set_validity_icpt(scb_s, 0x0035U); scb_s->ecb3 |= ecb3_flags; - scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT1 | - CRYCB_FORMAT2; + scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT2; /* xor both blocks in one run */ b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask; From 56019f9aca2266edc7f304d91e4e772023b7aa14 Mon Sep 17 00:00:00 2001 From: Pierre Morel Date: Tue, 25 Sep 2018 19:16:33 -0400 Subject: [PATCH 018/204] KVM: s390: vsie: Allow CRYCB FORMAT-2 When the guest and the host both use CRYCB FORMAT-2, we copy the guest's FORMAT-1 APCB to a FORMAT-1 shadow APCB. This patch also cleans up the shadow_crycb() function. Signed-off-by: Pierre Morel Signed-off-by: Tony Krowiak Message-Id: <20180925231641.4954-19-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- arch/s390/kvm/vsie.c | 114 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 104 insertions(+), 10 deletions(-) diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index e0e6fbfa88f4..3a932781e0b0 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -136,14 +136,81 @@ static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) return 0; } -/* +/** + * setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB + * @vcpu: pointer to the virtual CPU + * @apcb_s: pointer to start of apcb in the shadow crycb + * @apcb_o: pointer to start of original guest apcb + * @apcb_h: pointer to start of apcb in the host + * + * Returns 0 and -EFAULT on error reading guest apcb + */ +static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s, + unsigned long apcb_o, + unsigned long *apcb_h) +{ + if (read_guest_real(vcpu, apcb_o, apcb_s, + sizeof(struct kvm_s390_apcb1))) + return -EFAULT; + + bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb1)); + + return 0; +} + +/** + * setup_apcb - Create a shadow copy of the apcb. + * @vcpu: pointer to the virtual CPU + * @crycb_s: pointer to shadow crycb + * @crycb_o: pointer to original guest crycb + * @crycb_h: pointer to the host crycb + * @fmt_o: format of the original guest crycb. + * @fmt_h: format of the host crycb. + * + * Checks the compatibility between the guest and host crycb and calls the + * appropriate copy function. + * + * Return 0 or an error number if the guest and host crycb are incompatible. + */ +static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s, + const u32 crycb_o, + struct kvm_s390_crypto_cb *crycb_h, + int fmt_o, int fmt_h) +{ + struct kvm_s390_crypto_cb *crycb; + + crycb = (struct kvm_s390_crypto_cb *) (unsigned long)crycb_o; + + switch (fmt_o) { + case CRYCB_FORMAT2: + if ((crycb_o & PAGE_MASK) != ((crycb_o + 256) & PAGE_MASK)) + return -EACCES; + if (fmt_h != CRYCB_FORMAT2) + return -EINVAL; + return setup_apcb11(vcpu, (unsigned long *)&crycb_s->apcb1, + (unsigned long) &crycb->apcb1, + (unsigned long *)&crycb_h->apcb1); + } + return -EINVAL; +} + +/** + * shadow_crycb - Create a shadow copy of the crycb block + * @vcpu: a pointer to the virtual CPU + * @vsie_page: a pointer to internal date used for the vSIE + * * Create a shadow copy of the crycb block and setup key wrapping, if * requested for guest 3 and enabled for guest 2. * - * We accept format-1 or format-2, but we treat it as a format-1 (no AP in g2), - * and we convert it into format-2 in the shadow CRYCB. + * We accept format-1 or format-2, but we convert format-1 into format-2 + * in the shadow CRYCB. + * Using format-2 enables the firmware to choose the right format when + * scheduling the SIE. * There is nothing to do for format-0. * + * This function centralize the issuing of set_validity_icpt() for all + * the subfunctions working on the crycb. + * * Returns: - 0 if shadowed or nothing to do * - > 0 if control has to be given to guest 2 */ @@ -155,24 +222,42 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) const u32 crycb_addr = crycbd_o & 0x7ffffff8U; unsigned long *b1, *b2; u8 ecb3_flags; + int apie_h; + int key_msk = test_kvm_facility(vcpu->kvm, 76); + int fmt_o = crycbd_o & CRYCB_FORMAT_MASK; + int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK; + int ret = 0; scb_s->crycbd = 0; if (!(crycbd_o & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1)) return 0; - /* format-1 is supported with message-security-assist extension 3 */ - if (!test_kvm_facility(vcpu->kvm, 76)) + + apie_h = vcpu->arch.sie_block->eca & ECA_APIE; + if (!apie_h && !key_msk) return 0; - if ((crycb_addr & PAGE_MASK) != ((crycb_addr + 128) & PAGE_MASK)) - return set_validity_icpt(scb_s, 0x003CU); - else if (!crycb_addr) + if (!crycb_addr) return set_validity_icpt(scb_s, 0x0039U); + if (fmt_o == CRYCB_FORMAT1) + if ((crycb_addr & PAGE_MASK) != + ((crycb_addr + 128) & PAGE_MASK)) + return set_validity_icpt(scb_s, 0x003CU); + + if (apie_h && (scb_o->eca & ECA_APIE)) { + ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr, + vcpu->kvm->arch.crypto.crycb, + fmt_o, fmt_h); + if (ret) + goto end; + scb_s->eca |= scb_o->eca & ECA_APIE; + } + /* we may only allow it if enabled for guest 2 */ ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 & (ECB3_AES | ECB3_DEA); if (!ecb3_flags) - return 0; + goto end; /* copy only the wrapping keys */ if (read_guest_real(vcpu, crycb_addr + 72, @@ -180,7 +265,6 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) return set_validity_icpt(scb_s, 0x0035U); scb_s->ecb3 |= ecb3_flags; - scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT2; /* xor both blocks in one run */ b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask; @@ -188,6 +272,16 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask; /* as 56%8 == 0, bitmap_xor won't overwrite any data */ bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56); +end: + switch (ret) { + case -EINVAL: + return set_validity_icpt(scb_s, 0x0020U); + case -EFAULT: + return set_validity_icpt(scb_s, 0x0035U); + case -EACCES: + return set_validity_icpt(scb_s, 0x003CU); + } + scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT2; return 0; } From 19fd83a64718f8e0168b954c3ec0f353cb306c7f Mon Sep 17 00:00:00 2001 From: Pierre Morel Date: Tue, 25 Sep 2018 19:16:34 -0400 Subject: [PATCH 019/204] KVM: s390: vsie: allow CRYCB FORMAT-1 When the host and guest both use a FORMAT-1 CRYCB, we copy the guest's FORMAT-0 APCB to a shadow CRYCB for use by vSIE. Signed-off-by: Pierre Morel Signed-off-by: Tony Krowiak Message-Id: <20180925231641.4954-20-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- arch/s390/kvm/vsie.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 3a932781e0b0..0a26814c6efe 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -136,6 +136,19 @@ static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) return 0; } +/* Copy to APCB FORMAT0 from APCB FORMAT0 */ +static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s, + unsigned long apcb_o, unsigned long *apcb_h) +{ + if (read_guest_real(vcpu, apcb_o, apcb_s, + sizeof(struct kvm_s390_apcb0))) + return -EFAULT; + + bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb0)); + + return 0; +} + /** * setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB * @vcpu: pointer to the virtual CPU @@ -190,6 +203,12 @@ static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s, return setup_apcb11(vcpu, (unsigned long *)&crycb_s->apcb1, (unsigned long) &crycb->apcb1, (unsigned long *)&crycb_h->apcb1); + case CRYCB_FORMAT1: + if (fmt_h != CRYCB_FORMAT1) + return -EINVAL; + return setup_apcb00(vcpu, (unsigned long *) &crycb_s->apcb0, + (unsigned long) &crycb->apcb0, + (unsigned long *) &crycb_h->apcb0); } return -EINVAL; } From 6ee74098201b717696388cd9754d10a109346d6b Mon Sep 17 00:00:00 2001 From: Pierre Morel Date: Tue, 25 Sep 2018 19:16:35 -0400 Subject: [PATCH 020/204] KVM: s390: vsie: allow CRYCB FORMAT-0 When the host and the guest both use a FORMAT-0 CRYCB, we copy the guest's FORMAT-0 APCB to a shadow CRYCB for use by vSIE. Signed-off-by: Pierre Morel Signed-off-by: Tony Krowiak Message-Id: <20180925231641.4954-21-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- arch/s390/kvm/vsie.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 0a26814c6efe..67a5536637bb 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -136,7 +136,15 @@ static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) return 0; } -/* Copy to APCB FORMAT0 from APCB FORMAT0 */ +/** + * setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0 + * @vcpu: pointer to the virtual CPU + * @apcb_s: pointer to start of apcb in the shadow crycb + * @apcb_o: pointer to start of original apcb in the guest2 + * @apcb_h: pointer to start of apcb in the guest1 + * + * Returns 0 and -EFAULT on error reading guest apcb + */ static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s, unsigned long apcb_o, unsigned long *apcb_h) { @@ -209,6 +217,14 @@ static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s, return setup_apcb00(vcpu, (unsigned long *) &crycb_s->apcb0, (unsigned long) &crycb->apcb0, (unsigned long *) &crycb_h->apcb0); + case CRYCB_FORMAT0: + if ((crycb_o & PAGE_MASK) != ((crycb_o + 32) & PAGE_MASK)) + return -EACCES; + if (fmt_h != CRYCB_FORMAT0) + return -EINVAL; + return setup_apcb00(vcpu, (unsigned long *) &crycb_s->apcb0, + (unsigned long) &crycb->apcb0, + (unsigned long *) &crycb_h->apcb0); } return -EINVAL; } @@ -248,8 +264,6 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) int ret = 0; scb_s->crycbd = 0; - if (!(crycbd_o & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1)) - return 0; apie_h = vcpu->arch.sie_block->eca & ECA_APIE; if (!apie_h && !key_msk) From c9ba8c2cd210d35cdb48683db7c94f11c236d841 Mon Sep 17 00:00:00 2001 From: Pierre Morel Date: Tue, 25 Sep 2018 19:16:36 -0400 Subject: [PATCH 021/204] KVM: s390: vsie: allow guest FORMAT-0 CRYCB on host FORMAT-1 When the guest schedules a SIE with a FORMAT-0 CRYCB, we are able to schedule it in the host with a FORMAT-1 CRYCB if the host uses FORMAT-1 or FORMAT-0. Signed-off-by: Pierre Morel Signed-off-by: Tony Krowiak Message-Id: <20180925231641.4954-22-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- arch/s390/kvm/vsie.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 67a5536637bb..90ae31072991 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -220,11 +220,17 @@ static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s, case CRYCB_FORMAT0: if ((crycb_o & PAGE_MASK) != ((crycb_o + 32) & PAGE_MASK)) return -EACCES; - if (fmt_h != CRYCB_FORMAT0) + + switch (fmt_h) { + case CRYCB_FORMAT2: return -EINVAL; - return setup_apcb00(vcpu, (unsigned long *) &crycb_s->apcb0, - (unsigned long) &crycb->apcb0, - (unsigned long *) &crycb_h->apcb0); + case CRYCB_FORMAT1: + case CRYCB_FORMAT0: + return setup_apcb00(vcpu, + (unsigned long *) &crycb_s->apcb0, + (unsigned long) &crycb->apcb0, + (unsigned long *) &crycb_h->apcb0); + } } return -EINVAL; } From 6b79de4b056e5a2febc0c61233d8f0ad7868e49c Mon Sep 17 00:00:00 2001 From: Pierre Morel Date: Tue, 25 Sep 2018 19:16:37 -0400 Subject: [PATCH 022/204] KVM: s390: vsie: allow guest FORMAT-1 CRYCB on host FORMAT-2 When the guest schedules a SIE with a CRYCB FORMAT-1 CRYCB, we are able to schedule it in the host with a FORMAT-2 CRYCB if the host uses FORMAT-2. Signed-off-by: Pierre Morel Signed-off-by: Tony Krowiak Message-Id: <20180925231641.4954-23-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- arch/s390/kvm/vsie.c | 33 ++++++++++++++++++++++++++++----- 1 file changed, 28 insertions(+), 5 deletions(-) diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 90ae31072991..ca6466304731 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -135,6 +135,22 @@ static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) atomic_set(&scb_s->cpuflags, newflags); return 0; } +/* Copy to APCB FORMAT1 from APCB FORMAT0 */ +static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s, + unsigned long apcb_o, struct kvm_s390_apcb1 *apcb_h) +{ + struct kvm_s390_apcb0 tmp; + + if (read_guest_real(vcpu, apcb_o, &tmp, sizeof(struct kvm_s390_apcb0))) + return -EFAULT; + + apcb_s->apm[0] = apcb_h->apm[0] & tmp.apm[0]; + apcb_s->aqm[0] = apcb_h->aqm[0] & tmp.aqm[0] & 0xffff000000000000UL; + apcb_s->adm[0] = apcb_h->adm[0] & tmp.adm[0] & 0xffff000000000000UL; + + return 0; + +} /** * setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0 @@ -212,11 +228,18 @@ static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s, (unsigned long) &crycb->apcb1, (unsigned long *)&crycb_h->apcb1); case CRYCB_FORMAT1: - if (fmt_h != CRYCB_FORMAT1) - return -EINVAL; - return setup_apcb00(vcpu, (unsigned long *) &crycb_s->apcb0, - (unsigned long) &crycb->apcb0, - (unsigned long *) &crycb_h->apcb0); + switch (fmt_h) { + case CRYCB_FORMAT2: + return setup_apcb10(vcpu, &crycb_s->apcb1, + (unsigned long) &crycb->apcb0, + &crycb_h->apcb1); + case CRYCB_FORMAT1: + return setup_apcb00(vcpu, + (unsigned long *) &crycb_s->apcb0, + (unsigned long) &crycb->apcb0, + (unsigned long *) &crycb_h->apcb0); + } + break; case CRYCB_FORMAT0: if ((crycb_o & PAGE_MASK) != ((crycb_o + 32) & PAGE_MASK)) return -EACCES; From 9ee71f20cb8d90e156c0e00ff9949328f455b06b Mon Sep 17 00:00:00 2001 From: Pierre Morel Date: Tue, 25 Sep 2018 19:16:38 -0400 Subject: [PATCH 023/204] KVM: s390: vsie: allow guest FORMAT-0 CRYCB on host FORMAT-2 When the guest schedules a SIE with a FORMAT-0 CRYCB, we are able to schedule it in the host with a FORMAT-2 CRYCB if the host uses FORMAT-2 Signed-off-by: Pierre Morel Signed-off-by: Tony Krowiak Message-Id: <20180925231641.4954-24-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- arch/s390/kvm/vsie.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index ca6466304731..9117891651c6 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -246,7 +246,9 @@ static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s, switch (fmt_h) { case CRYCB_FORMAT2: - return -EINVAL; + return setup_apcb10(vcpu, &crycb_s->apcb1, + (unsigned long) &crycb->apcb0, + &crycb_h->apcb1); case CRYCB_FORMAT1: case CRYCB_FORMAT0: return setup_apcb00(vcpu, From 37940fb0b6a2c4bf101481365c42f250694ffd15 Mon Sep 17 00:00:00 2001 From: Tony Krowiak Date: Tue, 25 Sep 2018 19:16:39 -0400 Subject: [PATCH 024/204] KVM: s390: device attrs to enable/disable AP interpretation Introduces two new VM crypto device attributes (KVM_S390_VM_CRYPTO) to enable or disable AP instruction interpretation from userspace via the KVM_SET_DEVICE_ATTR ioctl: * The KVM_S390_VM_CRYPTO_ENABLE_APIE attribute enables hardware interpretation of AP instructions executed on the guest. * The KVM_S390_VM_CRYPTO_DISABLE_APIE attribute disables hardware interpretation of AP instructions executed on the guest. In this case the instructions will be intercepted and pass through to the guest. Signed-off-by: Tony Krowiak Reviewed-by: Cornelia Huck Message-Id: <20180925231641.4954-25-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- arch/s390/include/uapi/asm/kvm.h | 2 ++ arch/s390/kvm/kvm-s390.c | 30 +++++++++++++++++++++++++++--- 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index 9a50f02b9894..16511d97e8dc 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -160,6 +160,8 @@ struct kvm_s390_vm_cpu_subfunc { #define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1 #define KVM_S390_VM_CRYPTO_DISABLE_AES_KW 2 #define KVM_S390_VM_CRYPTO_DISABLE_DEA_KW 3 +#define KVM_S390_VM_CRYPTO_ENABLE_APIE 4 +#define KVM_S390_VM_CRYPTO_DISABLE_APIE 5 /* kvm attributes for migration mode */ #define KVM_S390_VM_MIGRATION_STOP 0 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index c94ef2d0dbe4..a6230b00c1df 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -856,12 +856,11 @@ void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm) static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) { - if (!test_kvm_facility(kvm, 76)) - return -EINVAL; - mutex_lock(&kvm->lock); switch (attr->attr) { case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: + if (!test_kvm_facility(kvm, 76)) + return -EINVAL; get_random_bytes( kvm->arch.crypto.crycb->aes_wrapping_key_mask, sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); @@ -869,6 +868,8 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support"); break; case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: + if (!test_kvm_facility(kvm, 76)) + return -EINVAL; get_random_bytes( kvm->arch.crypto.crycb->dea_wrapping_key_mask, sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); @@ -876,17 +877,35 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support"); break; case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: + if (!test_kvm_facility(kvm, 76)) + return -EINVAL; kvm->arch.crypto.aes_kw = 0; memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support"); break; case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: + if (!test_kvm_facility(kvm, 76)) + return -EINVAL; kvm->arch.crypto.dea_kw = 0; memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support"); break; + case KVM_S390_VM_CRYPTO_ENABLE_APIE: + if (!ap_instructions_available()) { + mutex_unlock(&kvm->lock); + return -EOPNOTSUPP; + } + kvm->arch.crypto.apie = 1; + break; + case KVM_S390_VM_CRYPTO_DISABLE_APIE: + if (!ap_instructions_available()) { + mutex_unlock(&kvm->lock); + return -EOPNOTSUPP; + } + kvm->arch.crypto.apie = 0; + break; default: mutex_unlock(&kvm->lock); return -ENXIO; @@ -1495,6 +1514,10 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: ret = 0; break; + case KVM_S390_VM_CRYPTO_ENABLE_APIE: + case KVM_S390_VM_CRYPTO_DISABLE_APIE: + ret = ap_instructions_available() ? 0 : -ENXIO; + break; default: ret = -ENXIO; break; @@ -2601,6 +2624,7 @@ static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); + vcpu->arch.sie_block->eca &= ~ECA_APIE; if (vcpu->kvm->arch.crypto.apie) vcpu->arch.sie_block->eca |= ECA_APIE; From 112c24d4dc48d8c663e2f6d1c5ecb7473255c175 Mon Sep 17 00:00:00 2001 From: Tony Krowiak Date: Tue, 25 Sep 2018 19:16:40 -0400 Subject: [PATCH 025/204] KVM: s390: CPU model support for AP virtualization Introduces two new CPU model facilities to support AP virtualization for KVM guests: 1. AP Query Configuration Information (QCI) facility is installed. This is indicated by setting facilities bit 12 for the guest. The kernel will not enable this facility for the guest if it is not set on the host. If this facility is not set for the KVM guest, then only APQNs with an APQI less than 16 will be used by a Linux guest regardless of the matrix configuration for the virtual machine. This is a limitation of the Linux AP bus. 2. AP Facilities Test facility (APFT) is installed. This is indicated by setting facilities bit 15 for the guest. The kernel will not enable this facility for the guest if it is not set on the host. If this facility is not set for the KVM guest, then no AP devices will be available to the guest regardless of the guest's matrix configuration for the virtual machine. This is a limitation of the Linux AP bus. Signed-off-by: Tony Krowiak Reviewed-by: Christian Borntraeger Reviewed-by: Halil Pasic Reviewed-by: David Hildenbrand Tested-by: Michael Mueller Tested-by: Farhan Ali Acked-by: Cornelia Huck Message-Id: <20180925231641.4954-26-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- arch/s390/tools/gen_facilities.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c index 0c85aedcf9b3..fd788e0f2e5b 100644 --- a/arch/s390/tools/gen_facilities.c +++ b/arch/s390/tools/gen_facilities.c @@ -106,6 +106,8 @@ static struct facility_def facility_defs[] = { .name = "FACILITIES_KVM_CPUMODEL", .bits = (int[]){ + 12, /* AP Query Configuration Information */ + 15, /* AP Facilities Test */ 156, /* etoken facility */ -1 /* END */ } From 492a6be197c0ddf9053ed30722ca11bf6637f011 Mon Sep 17 00:00:00 2001 From: Tony Krowiak Date: Tue, 25 Sep 2018 19:16:41 -0400 Subject: [PATCH 026/204] s390: doc: detailed specifications for AP virtualization This patch provides documentation describing the AP architecture and design concepts behind the virtualization of AP devices. It also includes an example of how to configure AP devices for exclusive use of KVM guests. Signed-off-by: Tony Krowiak Reviewed-by: Halil Pasic Message-Id: <20180925231641.4954-27-akrowiak@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- Documentation/s390/vfio-ap.txt | 837 +++++++++++++++++++++++++++++++++ MAINTAINERS | 1 + 2 files changed, 838 insertions(+) create mode 100644 Documentation/s390/vfio-ap.txt diff --git a/Documentation/s390/vfio-ap.txt b/Documentation/s390/vfio-ap.txt new file mode 100644 index 000000000000..65167cfe4485 --- /dev/null +++ b/Documentation/s390/vfio-ap.txt @@ -0,0 +1,837 @@ +Introduction: +============ +The Adjunct Processor (AP) facility is an IBM Z cryptographic facility comprised +of three AP instructions and from 1 up to 256 PCIe cryptographic adapter cards. +The AP devices provide cryptographic functions to all CPUs assigned to a +linux system running in an IBM Z system LPAR. + +The AP adapter cards are exposed via the AP bus. The motivation for vfio-ap +is to make AP cards available to KVM guests using the VFIO mediated device +framework. This implementation relies considerably on the s390 virtualization +facilities which do most of the hard work of providing direct access to AP +devices. + +AP Architectural Overview: +========================= +To facilitate the comprehension of the design, let's start with some +definitions: + +* AP adapter + + An AP adapter is an IBM Z adapter card that can perform cryptographic + functions. There can be from 0 to 256 adapters assigned to an LPAR. Adapters + assigned to the LPAR in which a linux host is running will be available to + the linux host. Each adapter is identified by a number from 0 to 255; however, + the maximum adapter number is determined by machine model and/or adapter type. + When installed, an AP adapter is accessed by AP instructions executed by any + CPU. + + The AP adapter cards are assigned to a given LPAR via the system's Activation + Profile which can be edited via the HMC. When the linux host system is IPL'd + in the LPAR, the AP bus detects the AP adapter cards assigned to the LPAR and + creates a sysfs device for each assigned adapter. For example, if AP adapters + 4 and 10 (0x0a) are assigned to the LPAR, the AP bus will create the following + sysfs device entries: + + /sys/devices/ap/card04 + /sys/devices/ap/card0a + + Symbolic links to these devices will also be created in the AP bus devices + sub-directory: + + /sys/bus/ap/devices/[card04] + /sys/bus/ap/devices/[card04] + +* AP domain + + An adapter is partitioned into domains. An adapter can hold up to 256 domains + depending upon the adapter type and hardware configuration. A domain is + identified by a number from 0 to 255; however, the maximum domain number is + determined by machine model and/or adapter type.. A domain can be thought of + as a set of hardware registers and memory used for processing AP commands. A + domain can be configured with a secure private key used for clear key + encryption. A domain is classified in one of two ways depending upon how it + may be accessed: + + * Usage domains are domains that are targeted by an AP instruction to + process an AP command. + + * Control domains are domains that are changed by an AP command sent to a + usage domain; for example, to set the secure private key for the control + domain. + + The AP usage and control domains are assigned to a given LPAR via the system's + Activation Profile which can be edited via the HMC. When a linux host system + is IPL'd in the LPAR, the AP bus module detects the AP usage and control + domains assigned to the LPAR. The domain number of each usage domain and + adapter number of each AP adapter are combined to create AP queue devices + (see AP Queue section below). The domain number of each control domain will be + represented in a bitmask and stored in a sysfs file + /sys/bus/ap/ap_control_domain_mask. The bits in the mask, from most to least + significant bit, correspond to domains 0-255. + +* AP Queue + + An AP queue is the means by which an AP command is sent to a usage domain + inside a specific adapter. An AP queue is identified by a tuple + comprised of an AP adapter ID (APID) and an AP queue index (APQI). The + APQI corresponds to a given usage domain number within the adapter. This tuple + forms an AP Queue Number (APQN) uniquely identifying an AP queue. AP + instructions include a field containing the APQN to identify the AP queue to + which the AP command is to be sent for processing. + + The AP bus will create a sysfs device for each APQN that can be derived from + the cross product of the AP adapter and usage domain numbers detected when the + AP bus module is loaded. For example, if adapters 4 and 10 (0x0a) and usage + domains 6 and 71 (0x47) are assigned to the LPAR, the AP bus will create the + following sysfs entries: + + /sys/devices/ap/card04/04.0006 + /sys/devices/ap/card04/04.0047 + /sys/devices/ap/card0a/0a.0006 + /sys/devices/ap/card0a/0a.0047 + + The following symbolic links to these devices will be created in the AP bus + devices subdirectory: + + /sys/bus/ap/devices/[04.0006] + /sys/bus/ap/devices/[04.0047] + /sys/bus/ap/devices/[0a.0006] + /sys/bus/ap/devices/[0a.0047] + +* AP Instructions: + + There are three AP instructions: + + * NQAP: to enqueue an AP command-request message to a queue + * DQAP: to dequeue an AP command-reply message from a queue + * PQAP: to administer the queues + + AP instructions identify the domain that is targeted to process the AP + command; this must be one of the usage domains. An AP command may modify a + domain that is not one of the usage domains, but the modified domain + must be one of the control domains. + +AP and SIE: +========== +Let's now take a look at how AP instructions executed on a guest are interpreted +by the hardware. + +A satellite control block called the Crypto Control Block (CRYCB) is attached to +our main hardware virtualization control block. The CRYCB contains three fields +to identify the adapters, usage domains and control domains assigned to the KVM +guest: + +* The AP Mask (APM) field is a bit mask that identifies the AP adapters assigned + to the KVM guest. Each bit in the mask, from left to right (i.e. from most + significant to least significant bit in big endian order), corresponds to + an APID from 0-255. If a bit is set, the corresponding adapter is valid for + use by the KVM guest. + +* The AP Queue Mask (AQM) field is a bit mask identifying the AP usage domains + assigned to the KVM guest. Each bit in the mask, from left to right (i.e. from + most significant to least significant bit in big endian order), corresponds to + an AP queue index (APQI) from 0-255. If a bit is set, the corresponding queue + is valid for use by the KVM guest. + +* The AP Domain Mask field is a bit mask that identifies the AP control domains + assigned to the KVM guest. The ADM bit mask controls which domains can be + changed by an AP command-request message sent to a usage domain from the + guest. Each bit in the mask, from left to right (i.e. from most significant to + least significant bit in big endian order), corresponds to a domain from + 0-255. If a bit is set, the corresponding domain can be modified by an AP + command-request message sent to a usage domain. + +If you recall from the description of an AP Queue, AP instructions include +an APQN to identify the AP queue to which an AP command-request message is to be +sent (NQAP and PQAP instructions), or from which a command-reply message is to +be received (DQAP instruction). The validity of an APQN is defined by the matrix +calculated from the APM and AQM; it is the cross product of all assigned adapter +numbers (APM) with all assigned queue indexes (AQM). For example, if adapters 1 +and 2 and usage domains 5 and 6 are assigned to a guest, the APQNs (1,5), (1,6), +(2,5) and (2,6) will be valid for the guest. + +The APQNs can provide secure key functionality - i.e., a private key is stored +on the adapter card for each of its domains - so each APQN must be assigned to +at most one guest or to the linux host. + + Example 1: Valid configuration: + ------------------------------ + Guest1: adapters 1,2 domains 5,6 + Guest2: adapter 1,2 domain 7 + + This is valid because both guests have a unique set of APQNs: + Guest1 has APQNs (1,5), (1,6), (2,5), (2,6); + Guest2 has APQNs (1,7), (2,7) + + Example 2: Valid configuration: + ------------------------------ + Guest1: adapters 1,2 domains 5,6 + Guest2: adapters 3,4 domains 5,6 + + This is also valid because both guests have a unique set of APQNs: + Guest1 has APQNs (1,5), (1,6), (2,5), (2,6); + Guest2 has APQNs (3,5), (3,6), (4,5), (4,6) + + Example 3: Invalid configuration: + -------------------------------- + Guest1: adapters 1,2 domains 5,6 + Guest2: adapter 1 domains 6,7 + + This is an invalid configuration because both guests have access to + APQN (1,6). + +The Design: +=========== +The design introduces three new objects: + +1. AP matrix device +2. VFIO AP device driver (vfio_ap.ko) +3. VFIO AP mediated matrix pass-through device + +The VFIO AP device driver +------------------------- +The VFIO AP (vfio_ap) device driver serves the following purposes: + +1. Provides the interfaces to secure APQNs for exclusive use of KVM guests. + +2. Sets up the VFIO mediated device interfaces to manage a mediated matrix + device and creates the sysfs interfaces for assigning adapters, usage + domains, and control domains comprising the matrix for a KVM guest. + +3. Configures the APM, AQM and ADM in the CRYCB referenced by a KVM guest's + SIE state description to grant the guest access to a matrix of AP devices + +Reserve APQNs for exclusive use of KVM guests +--------------------------------------------- +The following block diagram illustrates the mechanism by which APQNs are +reserved: + + +------------------+ + 7 remove | | + +--------------------> cex4queue driver | + | | | + | +------------------+ + | + | + | +------------------+ +-----------------+ + | 5 register driver | | 3 create | | + | +----------------> Device core +----------> matrix device | + | | | | | | + | | +--------^---------+ +-----------------+ + | | | + | | +-------------------+ + | | +-----------------------------------+ | + | | | 4 register AP driver | | 2 register device + | | | | | ++--------+---+-v---+ +--------+-------+-+ +| | | | +| ap_bus +--------------------- > vfio_ap driver | +| | 8 probe | | ++--------^---------+ +--^--^------------+ +6 edit | | | + apmask | +-----------------------------+ | 9 mdev create + aqmask | | 1 modprobe | ++--------+-----+---+ +----------------+-+ +------------------+ +| | | |8 create | mediated | +| admin | | VFIO device core |---------> matrix | +| + | | | device | ++------+-+---------+ +--------^---------+ +--------^---------+ + | | | | + | | 9 create vfio_ap-passthrough | | + | +------------------------------+ | + +-------------------------------------------------------------+ + 10 assign adapter/domain/control domain + +The process for reserving an AP queue for use by a KVM guest is: + +1. The administrator loads the vfio_ap device driver +2. The vfio-ap driver during its initialization will register a single 'matrix' + device with the device core. This will serve as the parent device for + all mediated matrix devices used to configure an AP matrix for a guest. +3. The /sys/devices/vfio_ap/matrix device is created by the device core +4 The vfio_ap device driver will register with the AP bus for AP queue devices + of type 10 and higher (CEX4 and newer). The driver will provide the vfio_ap + driver's probe and remove callback interfaces. Devices older than CEX4 queues + are not supported to simplify the implementation by not needlessly + complicating the design by supporting older devices that will go out of + service in the relatively near future, and for which there are few older + systems around on which to test. +5. The AP bus registers the vfio_ap device driver with the device core +6. The administrator edits the AP adapter and queue masks to reserve AP queues + for use by the vfio_ap device driver. +7. The AP bus removes the AP queues reserved for the vfio_ap driver from the + default zcrypt cex4queue driver. +8. The AP bus probes the vfio_ap device driver to bind the queues reserved for + it. +9. The administrator creates a passthrough type mediated matrix device to be + used by a guest +10 The administrator assigns the adapters, usage domains and control domains + to be exclusively used by a guest. + +Set up the VFIO mediated device interfaces +------------------------------------------ +The VFIO AP device driver utilizes the common interface of the VFIO mediated +device core driver to: +* Register an AP mediated bus driver to add a mediated matrix device to and + remove it from a VFIO group. +* Create and destroy a mediated matrix device +* Add a mediated matrix device to and remove it from the AP mediated bus driver +* Add a mediated matrix device to and remove it from an IOMMU group + +The following high-level block diagram shows the main components and interfaces +of the VFIO AP mediated matrix device driver: + + +-------------+ + | | + | +---------+ | mdev_register_driver() +--------------+ + | | Mdev | +<-----------------------+ | + | | bus | | | vfio_mdev.ko | + | | driver | +----------------------->+ |<-> VFIO user + | +---------+ | probe()/remove() +--------------+ APIs + | | + | MDEV CORE | + | MODULE | + | mdev.ko | + | +---------+ | mdev_register_device() +--------------+ + | |Physical | +<-----------------------+ | + | | device | | | vfio_ap.ko |<-> matrix + | |interface| +----------------------->+ | device + | +---------+ | callback +--------------+ + +-------------+ + +During initialization of the vfio_ap module, the matrix device is registered +with an 'mdev_parent_ops' structure that provides the sysfs attribute +structures, mdev functions and callback interfaces for managing the mediated +matrix device. + +* sysfs attribute structures: + * supported_type_groups + The VFIO mediated device framework supports creation of user-defined + mediated device types. These mediated device types are specified + via the 'supported_type_groups' structure when a device is registered + with the mediated device framework. The registration process creates the + sysfs structures for each mediated device type specified in the + 'mdev_supported_types' sub-directory of the device being registered. Along + with the device type, the sysfs attributes of the mediated device type are + provided. + + The VFIO AP device driver will register one mediated device type for + passthrough devices: + /sys/devices/vfio_ap/matrix/mdev_supported_types/vfio_ap-passthrough + Only the read-only attributes required by the VFIO mdev framework will + be provided: + ... name + ... device_api + ... available_instances + ... device_api + Where: + * name: specifies the name of the mediated device type + * device_api: the mediated device type's API + * available_instances: the number of mediated matrix passthrough devices + that can be created + * device_api: specifies the VFIO API + * mdev_attr_groups + This attribute group identifies the user-defined sysfs attributes of the + mediated device. When a device is registered with the VFIO mediated device + framework, the sysfs attribute files identified in the 'mdev_attr_groups' + structure will be created in the mediated matrix device's directory. The + sysfs attributes for a mediated matrix device are: + * assign_adapter: + * unassign_adapter: + Write-only attributes for assigning/unassigning an AP adapter to/from the + mediated matrix device. To assign/unassign an adapter, the APID of the + adapter is echoed to the respective attribute file. + * assign_domain: + * unassign_domain: + Write-only attributes for assigning/unassigning an AP usage domain to/from + the mediated matrix device. To assign/unassign a domain, the domain + number of the the usage domain is echoed to the respective attribute + file. + * matrix: + A read-only file for displaying the APQNs derived from the cross product + of the adapter and domain numbers assigned to the mediated matrix device. + * assign_control_domain: + * unassign_control_domain: + Write-only attributes for assigning/unassigning an AP control domain + to/from the mediated matrix device. To assign/unassign a control domain, + the ID of the domain to be assigned/unassigned is echoed to the respective + attribute file. + * control_domains: + A read-only file for displaying the control domain numbers assigned to the + mediated matrix device. + +* functions: + * create: + allocates the ap_matrix_mdev structure used by the vfio_ap driver to: + * Store the reference to the KVM structure for the guest using the mdev + * Store the AP matrix configuration for the adapters, domains, and control + domains assigned via the corresponding sysfs attributes files + * remove: + deallocates the mediated matrix device's ap_matrix_mdev structure. This will + be allowed only if a running guest is not using the mdev. + +* callback interfaces + * open: + The vfio_ap driver uses this callback to register a + VFIO_GROUP_NOTIFY_SET_KVM notifier callback function for the mdev matrix + device. The open is invoked when QEMU connects the VFIO iommu group + for the mdev matrix device to the MDEV bus. Access to the KVM structure used + to configure the KVM guest is provided via this callback. The KVM structure, + is used to configure the guest's access to the AP matrix defined via the + mediated matrix device's sysfs attribute files. + * release: + unregisters the VFIO_GROUP_NOTIFY_SET_KVM notifier callback function for the + mdev matrix device and deconfigures the guest's AP matrix. + +Configure the APM, AQM and ADM in the CRYCB: +------------------------------------------- +Configuring the AP matrix for a KVM guest will be performed when the +VFIO_GROUP_NOTIFY_SET_KVM notifier callback is invoked. The notifier +function is called when QEMU connects to KVM. The guest's AP matrix is +configured via it's CRYCB by: +* Setting the bits in the APM corresponding to the APIDs assigned to the + mediated matrix device via its 'assign_adapter' interface. +* Setting the bits in the AQM corresponding to the domains assigned to the + mediated matrix device via its 'assign_domain' interface. +* Setting the bits in the ADM corresponding to the domain dIDs assigned to the + mediated matrix device via its 'assign_control_domains' interface. + +The CPU model features for AP +----------------------------- +The AP stack relies on the presence of the AP instructions as well as two +facilities: The AP Facilities Test (APFT) facility; and the AP Query +Configuration Information (QCI) facility. These features/facilities are made +available to a KVM guest via the following CPU model features: + +1. ap: Indicates whether the AP instructions are installed on the guest. This + feature will be enabled by KVM only if the AP instructions are installed + on the host. + +2. apft: Indicates the APFT facility is available on the guest. This facility + can be made available to the guest only if it is available on the host (i.e., + facility bit 15 is set). + +3. apqci: Indicates the AP QCI facility is available on the guest. This facility + can be made available to the guest only if it is available on the host (i.e., + facility bit 12 is set). + +Note: If the user chooses to specify a CPU model different than the 'host' +model to QEMU, the CPU model features and facilities need to be turned on +explicitly; for example: + + /usr/bin/qemu-system-s390x ... -cpu z13,ap=on,apqci=on,apft=on + +A guest can be precluded from using AP features/facilities by turning them off +explicitly; for example: + + /usr/bin/qemu-system-s390x ... -cpu host,ap=off,apqci=off,apft=off + +Note: If the APFT facility is turned off (apft=off) for the guest, the guest +will not see any AP devices. The zcrypt device drivers that register for type 10 +and newer AP devices - i.e., the cex4card and cex4queue device drivers - need +the APFT facility to ascertain the facilities installed on a given AP device. If +the APFT facility is not installed on the guest, then the probe of device +drivers will fail since only type 10 and newer devices can be configured for +guest use. + +Example: +======= +Let's now provide an example to illustrate how KVM guests may be given +access to AP facilities. For this example, we will show how to configure +three guests such that executing the lszcrypt command on the guests would +look like this: + +Guest1 +------ +CARD.DOMAIN TYPE MODE +------------------------------ +05 CEX5C CCA-Coproc +05.0004 CEX5C CCA-Coproc +05.00ab CEX5C CCA-Coproc +06 CEX5A Accelerator +06.0004 CEX5A Accelerator +06.00ab CEX5C CCA-Coproc + +Guest2 +------ +CARD.DOMAIN TYPE MODE +------------------------------ +05 CEX5A Accelerator +05.0047 CEX5A Accelerator +05.00ff CEX5A Accelerator + +Guest2 +------ +CARD.DOMAIN TYPE MODE +------------------------------ +06 CEX5A Accelerator +06.0047 CEX5A Accelerator +06.00ff CEX5A Accelerator + +These are the steps: + +1. Install the vfio_ap module on the linux host. The dependency chain for the + vfio_ap module is: + * iommu + * s390 + * zcrypt + * vfio + * vfio_mdev + * vfio_mdev_device + * KVM + + To build the vfio_ap module, the kernel build must be configured with the + following Kconfig elements selected: + * IOMMU_SUPPORT + * S390 + * ZCRYPT + * S390_AP_IOMMU + * VFIO + * VFIO_MDEV + * VFIO_MDEV_DEVICE + * KVM + + If using make menuconfig select the following to build the vfio_ap module: + -> Device Drivers + -> IOMMU Hardware Support + select S390 AP IOMMU Support + -> VFIO Non-Privileged userspace driver framework + -> Mediated device driver frramework + -> VFIO driver for Mediated devices + -> I/O subsystem + -> VFIO support for AP devices + +2. Secure the AP queues to be used by the three guests so that the host can not + access them. To secure them, there are two sysfs files that specify + bitmasks marking a subset of the APQN range as 'usable by the default AP + queue device drivers' or 'not usable by the default device drivers' and thus + available for use by the vfio_ap device driver'. The location of the sysfs + files containing the masks are: + + /sys/bus/ap/apmask + /sys/bus/ap/aqmask + + The 'apmask' is a 256-bit mask that identifies a set of AP adapter IDs + (APID). Each bit in the mask, from left to right (i.e., from most significant + to least significant bit in big endian order), corresponds to an APID from + 0-255. If a bit is set, the APID is marked as usable only by the default AP + queue device drivers; otherwise, the APID is usable by the vfio_ap + device driver. + + The 'aqmask' is a 256-bit mask that identifies a set of AP queue indexes + (APQI). Each bit in the mask, from left to right (i.e., from most significant + to least significant bit in big endian order), corresponds to an APQI from + 0-255. If a bit is set, the APQI is marked as usable only by the default AP + queue device drivers; otherwise, the APQI is usable by the vfio_ap device + driver. + + Take, for example, the following mask: + + 0x7dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff + + It indicates: + + 1, 2, 3, 4, 5, and 7-255 belong to the default drivers' pool, and 0 and 6 + belong to the vfio_ap device driver's pool. + + The APQN of each AP queue device assigned to the linux host is checked by the + AP bus against the set of APQNs derived from the cross product of APIDs + and APQIs marked as usable only by the default AP queue device drivers. If a + match is detected, only the default AP queue device drivers will be probed; + otherwise, the vfio_ap device driver will be probed. + + By default, the two masks are set to reserve all APQNs for use by the default + AP queue device drivers. There are two ways the default masks can be changed: + + 1. The sysfs mask files can be edited by echoing a string into the + respective sysfs mask file in one of two formats: + + * An absolute hex string starting with 0x - like "0x12345678" - sets + the mask. If the given string is shorter than the mask, it is padded + with 0s on the right; for example, specifying a mask value of 0x41 is + the same as specifying: + + 0x4100000000000000000000000000000000000000000000000000000000000000 + + Keep in mind that the mask reads from left to right (i.e., most + significant to least significant bit in big endian order), so the mask + above identifies device numbers 1 and 7 (01000001). + + If the string is longer than the mask, the operation is terminated with + an error (EINVAL). + + * Individual bits in the mask can be switched on and off by specifying + each bit number to be switched in a comma separated list. Each bit + number string must be prepended with a ('+') or minus ('-') to indicate + the corresponding bit is to be switched on ('+') or off ('-'). Some + valid values are: + + "+0" switches bit 0 on + "-13" switches bit 13 off + "+0x41" switches bit 65 on + "-0xff" switches bit 255 off + + The following example: + +0,-6,+0x47,-0xf0 + + Switches bits 0 and 71 (0x47) on + Switches bits 6 and 240 (0xf0) off + + Note that the bits not specified in the list remain as they were before + the operation. + + 2. The masks can also be changed at boot time via parameters on the kernel + command line like this: + + ap.apmask=0xffff ap.aqmask=0x40 + + This would create the following masks: + + apmask: + 0xffff000000000000000000000000000000000000000000000000000000000000 + + aqmask: + 0x4000000000000000000000000000000000000000000000000000000000000000 + + Resulting in these two pools: + + default drivers pool: adapter 0-15, domain 1 + alternate drivers pool: adapter 16-255, domains 0, 2-255 + + Securing the APQNs for our example: + ---------------------------------- + To secure the AP queues 05.0004, 05.0047, 05.00ab, 05.00ff, 06.0004, 06.0047, + 06.00ab, and 06.00ff for use by the vfio_ap device driver, the corresponding + APQNs can either be removed from the default masks: + + echo -5,-6 > /sys/bus/ap/apmask + + echo -4,-0x47,-0xab,-0xff > /sys/bus/ap/aqmask + + Or the masks can be set as follows: + + echo 0xf9ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff \ + > apmask + + echo 0xf7fffffffffffffffeffffffffffffffffffffffffeffffffffffffffffffffe \ + > aqmask + + This will result in AP queues 05.0004, 05.0047, 05.00ab, 05.00ff, 06.0004, + 06.0047, 06.00ab, and 06.00ff getting bound to the vfio_ap device driver. The + sysfs directory for the vfio_ap device driver will now contain symbolic links + to the AP queue devices bound to it: + + /sys/bus/ap + ... [drivers] + ...... [vfio_ap] + ......... [05.0004] + ......... [05.0047] + ......... [05.00ab] + ......... [05.00ff] + ......... [06.0004] + ......... [06.0047] + ......... [06.00ab] + ......... [06.00ff] + + Keep in mind that only type 10 and newer adapters (i.e., CEX4 and later) + can be bound to the vfio_ap device driver. The reason for this is to + simplify the implementation by not needlessly complicating the design by + supporting older devices that will go out of service in the relatively near + future and for which there are few older systems on which to test. + + The administrator, therefore, must take care to secure only AP queues that + can be bound to the vfio_ap device driver. The device type for a given AP + queue device can be read from the parent card's sysfs directory. For example, + to see the hardware type of the queue 05.0004: + + cat /sys/bus/ap/devices/card05/hwtype + + The hwtype must be 10 or higher (CEX4 or newer) in order to be bound to the + vfio_ap device driver. + +3. Create the mediated devices needed to configure the AP matrixes for the + three guests and to provide an interface to the vfio_ap driver for + use by the guests: + + /sys/devices/vfio_ap/matrix/ + --- [mdev_supported_types] + ------ [vfio_ap-passthrough] (passthrough mediated matrix device type) + --------- create + --------- [devices] + + To create the mediated devices for the three guests: + + uuidgen > create + uuidgen > create + uuidgen > create + + or + + echo $uuid1 > create + echo $uuid2 > create + echo $uuid3 > create + + This will create three mediated devices in the [devices] subdirectory named + after the UUID written to the create attribute file. We call them $uuid1, + $uuid2 and $uuid3 and this is the sysfs directory structure after creation: + + /sys/devices/vfio_ap/matrix/ + --- [mdev_supported_types] + ------ [vfio_ap-passthrough] + --------- [devices] + ------------ [$uuid1] + --------------- assign_adapter + --------------- assign_control_domain + --------------- assign_domain + --------------- matrix + --------------- unassign_adapter + --------------- unassign_control_domain + --------------- unassign_domain + + ------------ [$uuid2] + --------------- assign_adapter + --------------- assign_control_domain + --------------- assign_domain + --------------- matrix + --------------- unassign_adapter + ----------------unassign_control_domain + ----------------unassign_domain + + ------------ [$uuid3] + --------------- assign_adapter + --------------- assign_control_domain + --------------- assign_domain + --------------- matrix + --------------- unassign_adapter + ----------------unassign_control_domain + ----------------unassign_domain + +4. The administrator now needs to configure the matrixes for the mediated + devices $uuid1 (for Guest1), $uuid2 (for Guest2) and $uuid3 (for Guest3). + + This is how the matrix is configured for Guest1: + + echo 5 > assign_adapter + echo 6 > assign_adapter + echo 4 > assign_domain + echo 0xab > assign_domain + + Control domains can similarly be assigned using the assign_control_domain + sysfs file. + + If a mistake is made configuring an adapter, domain or control domain, + you can use the unassign_xxx files to unassign the adapter, domain or + control domain. + + To display the matrix configuration for Guest1: + + cat matrix + + This is how the matrix is configured for Guest2: + + echo 5 > assign_adapter + echo 0x47 > assign_domain + echo 0xff > assign_domain + + This is how the matrix is configured for Guest3: + + echo 6 > assign_adapter + echo 0x47 > assign_domain + echo 0xff > assign_domain + + In order to successfully assign an adapter: + + * The adapter number specified must represent a value from 0 up to the + maximum adapter number configured for the system. If an adapter number + higher than the maximum is specified, the operation will terminate with + an error (ENODEV). + + * All APQNs that can be derived from the adapter ID and the IDs of + the previously assigned domains must be bound to the vfio_ap device + driver. If no domains have yet been assigned, then there must be at least + one APQN with the specified APID bound to the vfio_ap driver. If no such + APQNs are bound to the driver, the operation will terminate with an + error (EADDRNOTAVAIL). + + No APQN that can be derived from the adapter ID and the IDs of the + previously assigned domains can be assigned to another mediated matrix + device. If an APQN is assigned to another mediated matrix device, the + operation will terminate with an error (EADDRINUSE). + + In order to successfully assign a domain: + + * The domain number specified must represent a value from 0 up to the + maximum domain number configured for the system. If a domain number + higher than the maximum is specified, the operation will terminate with + an error (ENODEV). + + * All APQNs that can be derived from the domain ID and the IDs of + the previously assigned adapters must be bound to the vfio_ap device + driver. If no domains have yet been assigned, then there must be at least + one APQN with the specified APQI bound to the vfio_ap driver. If no such + APQNs are bound to the driver, the operation will terminate with an + error (EADDRNOTAVAIL). + + No APQN that can be derived from the domain ID and the IDs of the + previously assigned adapters can be assigned to another mediated matrix + device. If an APQN is assigned to another mediated matrix device, the + operation will terminate with an error (EADDRINUSE). + + In order to successfully assign a control domain, the domain number + specified must represent a value from 0 up to the maximum domain number + configured for the system. If a control domain number higher than the maximum + is specified, the operation will terminate with an error (ENODEV). + +5. Start Guest1: + + /usr/bin/qemu-system-s390x ... -cpu host,ap=on,apqci=on,apft=on \ + -device vfio-ap,sysfsdev=/sys/devices/vfio_ap/matrix/$uuid1 ... + +7. Start Guest2: + + /usr/bin/qemu-system-s390x ... -cpu host,ap=on,apqci=on,apft=on \ + -device vfio-ap,sysfsdev=/sys/devices/vfio_ap/matrix/$uuid2 ... + +7. Start Guest3: + + /usr/bin/qemu-system-s390x ... -cpu host,ap=on,apqci=on,apft=on \ + -device vfio-ap,sysfsdev=/sys/devices/vfio_ap/matrix/$uuid3 ... + +When the guest is shut down, the mediated matrix devices may be removed. + +Using our example again, to remove the mediated matrix device $uuid1: + + /sys/devices/vfio_ap/matrix/ + --- [mdev_supported_types] + ------ [vfio_ap-passthrough] + --------- [devices] + ------------ [$uuid1] + --------------- remove + + + echo 1 > remove + + This will remove all of the mdev matrix device's sysfs structures including + the mdev device itself. To recreate and reconfigure the mdev matrix device, + all of the steps starting with step 3 will have to be performed again. Note + that the remove will fail if a guest using the mdev is still running. + + It is not necessary to remove an mdev matrix device, but one may want to + remove it if no guest will use it during the remaining lifetime of the linux + host. If the mdev matrix device is removed, one may want to also reconfigure + the pool of adapters and queues reserved for use by the default drivers. + +Limitations +=========== +* The KVM/kernel interfaces do not provide a way to prevent restoring an APQN + to the default drivers pool of a queue that is still assigned to a mediated + device in use by a guest. It is incumbent upon the administrator to + ensure there is no mediated device in use by a guest to which the APQN is + assigned lest the host be given access to the private data of the AP queue + device such as a private key configured specifically for the guest. + +* Dynamically modifying the AP matrix for a running guest (which would amount to + hot(un)plug of AP devices for the guest) is currently not supported + +* Live guest migration is not supported for guests using AP devices. diff --git a/MAINTAINERS b/MAINTAINERS index 9cd3997445be..a2e401fe8d4e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12678,6 +12678,7 @@ S: Supported F: drivers/s390/crypto/vfio_ap_drv.c F: drivers/s390/crypto/vfio_ap_private.h F: drivers/s390/crypto/vfio_ap_ops.c +F: Documentation/s390/vfio-ap.txt S390 ZFCP DRIVER M: Steffen Maier From 67d49d52ae502eaea8858fbcb97e3c2891f78da9 Mon Sep 17 00:00:00 2001 From: Collin Walling Date: Fri, 31 Aug 2018 12:51:19 -0400 Subject: [PATCH 027/204] KVM: s390: set host program identifier A host program identifier (HPID) provides information regarding the underlying host environment. A level-2 (VM) guest will have an HPID denoting Linux/KVM, which is set during VCPU setup. A level-3 (VM on a VM) and beyond guest will have an HPID denoting KVM vSIE, which is set for all shadow control blocks, overriding the original value of the HPID. Signed-off-by: Collin Walling Reviewed-by: Janosch Frank Message-Id: <1535734279-10204-4-git-send-email-walling@linux.ibm.com> Reviewed-by: Christian Borntraeger Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 6 +++++- arch/s390/kvm/kvm-s390.c | 2 ++ arch/s390/kvm/vsie.c | 2 ++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 29c940bf8506..47a5a2562aa4 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -237,7 +237,11 @@ struct kvm_s390_sie_block { psw_t gpsw; /* 0x0090 */ __u64 gg14; /* 0x00a0 */ __u64 gg15; /* 0x00a8 */ - __u8 reservedb0[20]; /* 0x00b0 */ + __u8 reservedb0[8]; /* 0x00b0 */ +#define HPID_KVM 0x4 +#define HPID_VSIE 0x5 + __u8 hpid; /* 0x00b8 */ + __u8 reservedb9[11]; /* 0x00b9 */ __u16 extcpuaddr; /* 0x00c4 */ __u16 eic; /* 0x00c6 */ __u32 reservedc8; /* 0x00c8 */ diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f69333fd2fa3..a1e8205d4b6b 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2685,6 +2685,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; + vcpu->arch.sie_block->hpid = HPID_KVM; + kvm_s390_vcpu_crypto_setup(vcpu); return rc; diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index a2b28cd1e3fe..41eab96c5f87 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -383,6 +383,8 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) if (test_kvm_facility(vcpu->kvm, 156)) scb_s->ecd |= scb_o->ecd & ECD_ETOKENF; + scb_s->hpid = HPID_VSIE; + prepare_ibc(vcpu, vsie_page); rc = shadow_crycb(vcpu, vsie_page); out: From af4bf6c3d9b45b62da86d928d3fd2d4ddc8549e6 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 6 Aug 2018 17:54:07 +0200 Subject: [PATCH 028/204] s390/mm: optimize locking without huge pages in gmap_pmd_op_walk() Right now we temporarily take the page table lock in gmap_pmd_op_walk() even though we know we won't need it (if we can never have 1mb pages mapped into the gmap). Let's make this a special case, so gmap_protect_range() and gmap_sync_dirty_log_pmd() will not take the lock when huge pages are not allowed. gmap_protect_range() is called quite frequently for managing shadow page tables in vSIE environments. Signed-off-by: David Hildenbrand Reviewed-by: Janosch Frank Message-Id: <20180806155407.15252-1-david@redhat.com> Signed-off-by: Janosch Frank --- arch/s390/mm/gmap.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index bb44990c8212..d4fa0a4514e0 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -905,10 +905,16 @@ static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr) pmd_t *pmdp; BUG_ON(gmap_is_shadow(gmap)); - spin_lock(&gmap->guest_table_lock); pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1); + if (!pmdp) + return NULL; - if (!pmdp || pmd_none(*pmdp)) { + /* without huge pages, there is no need to take the table lock */ + if (!gmap->mm->context.allow_gmap_hpage_1m) + return pmd_none(*pmdp) ? NULL : pmdp; + + spin_lock(&gmap->guest_table_lock); + if (pmd_none(*pmdp)) { spin_unlock(&gmap->guest_table_lock); return NULL; } From d2db7773ba864df6b4e19643dfc54838550d8049 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Wed, 26 Sep 2018 17:32:37 +0100 Subject: [PATCH 029/204] kvm: arm/arm64: Fix stage2_flush_memslot for 4 level page table So far we have only supported 3 level page table with fixed IPA of 40bits, where PUD is folded. With 4 level page tables, we need to check if the PUD entry is valid or not. Fix stage2_flush_memslot() to do this check, before walking down the table. Acked-by: Christoffer Dall Acked-by: Marc Zyngier Reviewed-by: Eric Auger Signed-off-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- virt/kvm/arm/mmu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index ed162a6c57c5..ee7ce8fa4a12 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -412,7 +412,8 @@ static void stage2_flush_memslot(struct kvm *kvm, pgd = kvm->arch.pgd + stage2_pgd_index(addr); do { next = stage2_pgd_addr_end(addr, end); - stage2_flush_puds(kvm, pgd, addr, next); + if (!stage2_pgd_none(*pgd)) + stage2_flush_puds(kvm, pgd, addr, next); } while (pgd++, addr = next, addr != end); } From 7788a28062aca211979ecd2c334e06e2ef5281cc Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Wed, 26 Sep 2018 17:32:38 +0100 Subject: [PATCH 030/204] kvm: arm/arm64: Remove spurious WARN_ON On a 4-level page table pgd entry can be empty, unlike a 3-level page table. Remove the spurious WARN_ON() in stage_get_pud(). Acked-by: Christoffer Dall Acked-by: Marc Zyngier Reviewed-by: Eric Auger Signed-off-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- virt/kvm/arm/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index ee7ce8fa4a12..4a285d760ce0 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -1005,7 +1005,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache pud_t *pud; pgd = kvm->arch.pgd + stage2_pgd_index(addr); - if (WARN_ON(stage2_pgd_none(*pgd))) { + if (stage2_pgd_none(*pgd)) { if (!cache) return NULL; pud = mmu_memory_cache_alloc(cache); From 9f98ddd6686cc9469fb73b11ddd403271d65cbdf Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Wed, 26 Sep 2018 17:32:39 +0100 Subject: [PATCH 031/204] kvm: arm64: Add helper for loading the stage2 setting for a VM We load the stage2 context of a guest for different operations, including running the guest and tlb maintenance on behalf of the guest. As of now only the vttbr is private to the guest, but this is about to change with IPA per VM. Add a helper to load the stage2 configuration for a VM, which could do the right thing with the future changes. Cc: Christoffer Dall Cc: Marc Zyngier Reviewed-by: Eric Auger Signed-off-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_hyp.h | 9 +++++++++ arch/arm64/kvm/hyp/switch.c | 2 +- arch/arm64/kvm/hyp/tlb.c | 4 ++-- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 384c34397619..d1bd1e0f14d7 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -155,5 +155,14 @@ void deactivate_traps_vhe_put(void); u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); void __noreturn __hyp_do_panic(unsigned long, ...); +/* + * Must be called from hyp code running at EL2 with an updated VTTBR + * and interrupts disabled. + */ +static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm) +{ + write_sysreg(kvm->arch.vttbr, vttbr_el2); +} + #endif /* __ARM64_KVM_HYP_H__ */ diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index ca46153d7915..9d5ce1a3039a 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -198,7 +198,7 @@ void deactivate_traps_vhe_put(void) static void __hyp_text __activate_vm(struct kvm *kvm) { - write_sysreg(kvm->arch.vttbr, vttbr_el2); + __load_guest_stage2(kvm); } static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index 131c7772703c..4dbd9c69a96d 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c @@ -30,7 +30,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm) * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so * let's flip TGE before executing the TLB operation. */ - write_sysreg(kvm->arch.vttbr, vttbr_el2); + __load_guest_stage2(kvm); val = read_sysreg(hcr_el2); val &= ~HCR_TGE; write_sysreg(val, hcr_el2); @@ -39,7 +39,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm) static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm) { - write_sysreg(kvm->arch.vttbr, vttbr_el2); + __load_guest_stage2(kvm); isb(); } From ce00e3cb4fb496683708db6bfce470e5c7710ddc Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Wed, 26 Sep 2018 17:32:40 +0100 Subject: [PATCH 032/204] arm64: Add a helper for PARange to physical shift conversion On arm64, ID_AA64MMFR0_EL1.PARange encodes the maximum Physical Address range supported by the CPU. Add a helper to decode this to actual physical shift. If we hit an unallocated value, return the maximum range supported by the kernel. This will be used by KVM to set the VTCR_EL2.T0SZ, as it is about to move its place. Having this helper keeps the code movement cleaner. Cc: Marc Zyngier Cc: James Morse Cc: Christoffer Dall Acked-by: Catalin Marinas Reviewed-by: Eric Auger Signed-off-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/cpufeature.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 1717ba1db35d..072cc1c970c2 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -530,6 +530,26 @@ void arm64_set_ssbd_mitigation(bool state); static inline void arm64_set_ssbd_mitigation(bool state) {} #endif +static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange) +{ + switch (parange) { + case 0: return 32; + case 1: return 36; + case 2: return 40; + case 3: return 42; + case 4: return 44; + case 5: return 48; + case 6: return 52; + /* + * A future PE could use a value unknown to the kernel. + * However, by the "D10.1.4 Principles of the ID scheme + * for fields in ID registers", ARM DDI 0487C.a, any new + * value is guaranteed to be higher than what we know already. + * As a safe limit, we return the limit supported by the kernel. + */ + default: return CONFIG_ARM64_PA_BITS; + } +} #endif /* __ASSEMBLY__ */ #endif From b2df44ffba363bd90274340e0adfd8137f5cd878 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Wed, 26 Sep 2018 17:32:41 +0100 Subject: [PATCH 033/204] kvm: arm64: Clean up VTCR_EL2 initialisation Use the new helper for converting the parange to the physical shift. Also, add the missing definitions for the VTCR_EL2 register fields and use them instead of hard coding numbers. Cc: Marc Zyngier Cc: Christoffer Dall Reviewed-by: Eric Auger Signed-off-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_arm.h | 3 +++ arch/arm64/kvm/hyp/s2-setup.c | 34 ++++++++------------------------ 2 files changed, 11 insertions(+), 26 deletions(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index aa45df752a16..5f807b680a5f 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -107,6 +107,7 @@ #define VTCR_EL2_RES1 (1 << 31) #define VTCR_EL2_HD (1 << 22) #define VTCR_EL2_HA (1 << 21) +#define VTCR_EL2_PS_SHIFT TCR_EL2_PS_SHIFT #define VTCR_EL2_PS_MASK TCR_EL2_PS_MASK #define VTCR_EL2_TG0_MASK TCR_TG0_MASK #define VTCR_EL2_TG0_4K TCR_TG0_4K @@ -127,6 +128,8 @@ #define VTCR_EL2_VS_8BIT (0 << VTCR_EL2_VS_SHIFT) #define VTCR_EL2_VS_16BIT (1 << VTCR_EL2_VS_SHIFT) +#define VTCR_EL2_T0SZ(x) TCR_T0SZ(x) + /* * We configure the Stage-2 page tables to always restrict the IPA space to be * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c index 603e1ee83e89..e1ca672e937a 100644 --- a/arch/arm64/kvm/hyp/s2-setup.c +++ b/arch/arm64/kvm/hyp/s2-setup.c @@ -19,45 +19,27 @@ #include #include #include +#include u32 __hyp_text __init_stage2_translation(void) { u64 val = VTCR_EL2_FLAGS; u64 parange; + u32 phys_shift; u64 tmp; /* * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS - * bits in VTCR_EL2. Amusingly, the PARange is 4 bits, while - * PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2... + * bits in VTCR_EL2. Amusingly, the PARange is 4 bits, but the + * allocated values are limited to 3bits. */ parange = read_sysreg(id_aa64mmfr0_el1) & 7; if (parange > ID_AA64MMFR0_PARANGE_MAX) parange = ID_AA64MMFR0_PARANGE_MAX; - val |= parange << 16; + val |= parange << VTCR_EL2_PS_SHIFT; /* Compute the actual PARange... */ - switch (parange) { - case 0: - parange = 32; - break; - case 1: - parange = 36; - break; - case 2: - parange = 40; - break; - case 3: - parange = 42; - break; - case 4: - parange = 44; - break; - case 5: - default: - parange = 48; - break; - } + phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange); /* * ... and clamp it to 40 bits, unless we have some braindead @@ -65,7 +47,7 @@ u32 __hyp_text __init_stage2_translation(void) * return that value for the rest of the kernel to decide what * to do. */ - val |= 64 - (parange > 40 ? 40 : parange); + val |= VTCR_EL2_T0SZ(phys_shift > 40 ? 40 : phys_shift); /* * Check the availability of Hardware Access Flag / Dirty Bit @@ -86,5 +68,5 @@ u32 __hyp_text __init_stage2_translation(void) write_sysreg(val, vtcr_el2); - return parange; + return phys_shift; } From 5b6c6742b5350a6fb5c631fb99a6bc046a62739c Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Wed, 26 Sep 2018 17:32:42 +0100 Subject: [PATCH 034/204] kvm: arm/arm64: Allow arch specific configurations for VM Allow the arch backends to perform VM specific initialisation. This will be later used to handle IPA size configuration and per-VM VTCR configuration on arm64. Cc: Marc Zyngier Cc: Christoffer Dall Reviewed-by: Eric Auger Signed-off-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 7 +++++++ arch/arm64/include/asm/kvm_host.h | 2 ++ arch/arm64/kvm/reset.c | 7 +++++++ virt/kvm/arm/arm.c | 5 +++-- 4 files changed, 19 insertions(+), 2 deletions(-) diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 3ad482d2f1eb..72d46418e1ef 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -354,4 +354,11 @@ static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {} struct kvm *kvm_arch_alloc_vm(void); void kvm_arch_free_vm(struct kvm *kvm); +static inline int kvm_arm_config_vm(struct kvm *kvm, unsigned long type) +{ + if (type) + return -EINVAL; + return 0; +} + #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 3d6d7336f871..b04280ae1be0 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -513,4 +513,6 @@ void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu); struct kvm *kvm_arch_alloc_vm(void); void kvm_arch_free_vm(struct kvm *kvm); +int kvm_arm_config_vm(struct kvm *kvm, unsigned long type); + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index e37c78bbe1ca..b0c07dab5cb3 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -133,3 +133,10 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) /* Reset timer */ return kvm_timer_vcpu_reset(vcpu); } + +int kvm_arm_config_vm(struct kvm *kvm, unsigned long type) +{ + if (type) + return -EINVAL; + return 0; +} diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index c92053bc3f96..327d0fd28380 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -120,8 +120,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { int ret, cpu; - if (type) - return -EINVAL; + ret = kvm_arm_config_vm(kvm, type); + if (ret) + return ret; kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran)); if (!kvm->arch.last_vcpu_ran) From 7665f3a8491b0ed3c6f65c0bc3a5424ea8f87731 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Wed, 26 Sep 2018 17:32:43 +0100 Subject: [PATCH 035/204] kvm: arm64: Configure VTCR_EL2 per VM Add support for setting the VTCR_EL2 per VM, rather than hard coding a value at boot time per CPU. This would allow us to tune the stage2 page table parameters per VM in the later changes. We compute the VTCR fields based on the system wide sanitised feature registers, except for the hardware management of Access Flags (VTCR_EL2.HA). It is fine to run a system with a mix of CPUs that may or may not update the page table Access Flags. Since the bit is RES0 on CPUs that don't support it, the bit should be ignored on them. Suggested-by: Marc Zyngier Acked-by: Christoffer Dall Signed-off-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_arm.h | 3 +- arch/arm64/include/asm/kvm_asm.h | 2 - arch/arm64/include/asm/kvm_host.h | 12 ++++-- arch/arm64/include/asm/kvm_hyp.h | 1 + arch/arm64/kvm/hyp/Makefile | 1 - arch/arm64/kvm/hyp/s2-setup.c | 72 ------------------------------- arch/arm64/kvm/reset.c | 35 +++++++++++++++ 7 files changed, 45 insertions(+), 81 deletions(-) delete mode 100644 arch/arm64/kvm/hyp/s2-setup.c diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 5f807b680a5f..14317b3a1820 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -135,8 +135,7 @@ * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are * not known to exist and will break with this configuration. * - * VTCR_EL2.PS is extracted from ID_AA64MMFR0_EL1.PARange at boot time - * (see hyp-init.S). + * The VTCR_EL2 is configured per VM and is initialised in kvm_arm_config_vm(). * * Note that when using 4K pages, we concatenate two first level page tables * together. With 16K pages, we concatenate 16 first level page tables. diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 102b5a5c47b6..0b53c72e7591 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -72,8 +72,6 @@ extern void __vgic_v3_init_lrs(void); extern u32 __kvm_get_mdcr_el2(void); -extern u32 __init_stage2_translation(void); - /* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */ #define __hyp_this_cpu_ptr(sym) \ ({ \ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index b04280ae1be0..5ecd457bce7d 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -61,11 +61,13 @@ struct kvm_arch { u64 vmid_gen; u32 vmid; - /* 1-level 2nd stage table, protected by kvm->mmu_lock */ + /* stage2 entry level table */ pgd_t *pgd; /* VTTBR value associated with above pgd and vmid */ u64 vttbr; + /* VTCR_EL2 value for this VM */ + u64 vtcr; /* The last vcpu id that ran on each physical CPU */ int __percpu *last_vcpu_ran; @@ -442,10 +444,12 @@ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, static inline void __cpu_init_stage2(void) { - u32 parange = kvm_call_hyp(__init_stage2_translation); + u32 ps; - WARN_ONCE(parange < 40, - "PARange is %d bits, unsupported configuration!", parange); + /* Sanity check for minimum IPA size support */ + ps = id_aa64mmfr0_parange_to_phys_shift(read_sysreg(id_aa64mmfr0_el1) & 0x7); + WARN_ONCE(ps < 40, + "PARange is %d bits, unsupported configuration!", ps); } /* Guest/host FPSIMD coordination helpers */ diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index d1bd1e0f14d7..23aca66767f9 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -161,6 +161,7 @@ void __noreturn __hyp_do_panic(unsigned long, ...); */ static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm) { + write_sysreg(kvm->arch.vtcr, vtcr_el2); write_sysreg(kvm->arch.vttbr, vttbr_el2); } diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index 2fabc2dc1966..82d1904328ad 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -19,7 +19,6 @@ obj-$(CONFIG_KVM_ARM_HOST) += switch.o obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o obj-$(CONFIG_KVM_ARM_HOST) += tlb.o obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o -obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o # KVM code is run at a different exception code with a different map, so # compiler instrumentation that inserts callbacks or checks into the code may diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c deleted file mode 100644 index e1ca672e937a..000000000000 --- a/arch/arm64/kvm/hyp/s2-setup.c +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (C) 2016 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include -#include -#include - -u32 __hyp_text __init_stage2_translation(void) -{ - u64 val = VTCR_EL2_FLAGS; - u64 parange; - u32 phys_shift; - u64 tmp; - - /* - * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS - * bits in VTCR_EL2. Amusingly, the PARange is 4 bits, but the - * allocated values are limited to 3bits. - */ - parange = read_sysreg(id_aa64mmfr0_el1) & 7; - if (parange > ID_AA64MMFR0_PARANGE_MAX) - parange = ID_AA64MMFR0_PARANGE_MAX; - val |= parange << VTCR_EL2_PS_SHIFT; - - /* Compute the actual PARange... */ - phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange); - - /* - * ... and clamp it to 40 bits, unless we have some braindead - * HW that implements less than that. In all cases, we'll - * return that value for the rest of the kernel to decide what - * to do. - */ - val |= VTCR_EL2_T0SZ(phys_shift > 40 ? 40 : phys_shift); - - /* - * Check the availability of Hardware Access Flag / Dirty Bit - * Management in ID_AA64MMFR1_EL1 and enable the feature in VTCR_EL2. - */ - tmp = (read_sysreg(id_aa64mmfr1_el1) >> ID_AA64MMFR1_HADBS_SHIFT) & 0xf; - if (tmp) - val |= VTCR_EL2_HA; - - /* - * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS - * bit in VTCR_EL2. - */ - tmp = (read_sysreg(id_aa64mmfr1_el1) >> ID_AA64MMFR1_VMIDBITS_SHIFT) & 0xf; - val |= (tmp == ID_AA64MMFR1_VMIDBITS_16) ? - VTCR_EL2_VS_16BIT : - VTCR_EL2_VS_8BIT; - - write_sysreg(val, vtcr_el2); - - return phys_shift; -} diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index b0c07dab5cb3..616120c4176b 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -26,6 +26,7 @@ #include +#include #include #include #include @@ -134,9 +135,43 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) return kvm_timer_vcpu_reset(vcpu); } +/* + * Configure the VTCR_EL2 for this VM. The VTCR value is common + * across all the physical CPUs on the system. We use system wide + * sanitised values to fill in different fields, except for Hardware + * Management of Access Flags. HA Flag is set unconditionally on + * all CPUs, as it is safe to run with or without the feature and + * the bit is RES0 on CPUs that don't support it. + */ int kvm_arm_config_vm(struct kvm *kvm, unsigned long type) { + u64 vtcr = VTCR_EL2_FLAGS; + u32 parange, phys_shift; + if (type) return -EINVAL; + + parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 7; + if (parange > ID_AA64MMFR0_PARANGE_MAX) + parange = ID_AA64MMFR0_PARANGE_MAX; + vtcr |= parange << VTCR_EL2_PS_SHIFT; + + phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange); + if (phys_shift > KVM_PHYS_SHIFT) + phys_shift = KVM_PHYS_SHIFT; + vtcr |= VTCR_EL2_T0SZ(phys_shift); + + /* + * Enable the Hardware Access Flag management, unconditionally + * on all CPUs. The features is RES0 on CPUs without the support + * and must be ignored by the CPUs. + */ + vtcr |= VTCR_EL2_HA; + + /* Set the vmid bits */ + vtcr |= (kvm_get_vmid_bits() == 16) ? + VTCR_EL2_VS_16BIT : + VTCR_EL2_VS_8BIT; + kvm->arch.vtcr = vtcr; return 0; } From e55cac5bf2a9cc86b57a9533d6b9e5005bc19b5c Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Wed, 26 Sep 2018 17:32:44 +0100 Subject: [PATCH 036/204] kvm: arm/arm64: Prepare for VM specific stage2 translations Right now the stage2 page table for a VM is hard coded, assuming an IPA of 40bits. As we are about to add support for per VM IPA, prepare the stage2 page table helpers to accept the kvm instance to make the right decision for the VM. No functional changes. Adds stage2_pgd_size(kvm) to replace S2_PGD_SIZE. Also, moves some of the definitions in arm32 to align with the arm64. Also drop the _AC() specifier constants wherever possible. Cc: Christoffer Dall Acked-by: Marc Zyngier Reviewed-by: Eric Auger Signed-off-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_arm.h | 3 +- arch/arm/include/asm/kvm_mmu.h | 13 +- arch/arm/include/asm/stage2_pgtable.h | 50 +++++--- arch/arm64/include/asm/kvm_mmu.h | 7 +- arch/arm64/include/asm/stage2_pgtable-nopmd.h | 18 +-- arch/arm64/include/asm/stage2_pgtable-nopud.h | 16 +-- arch/arm64/include/asm/stage2_pgtable.h | 58 +++++---- virt/kvm/arm/arm.c | 2 +- virt/kvm/arm/mmu.c | 119 +++++++++--------- virt/kvm/arm/vgic/vgic-kvm-device.c | 2 +- 10 files changed, 156 insertions(+), 132 deletions(-) diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index 3ab8b3781bfe..c3f1f9b304b7 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h @@ -133,8 +133,7 @@ * space. */ #define KVM_PHYS_SHIFT (40) -#define KVM_PHYS_SIZE (_AC(1, ULL) << KVM_PHYS_SHIFT) -#define KVM_PHYS_MASK (KVM_PHYS_SIZE - _AC(1, ULL)) + #define PTRS_PER_S2_PGD (_AC(1, ULL) << (KVM_PHYS_SHIFT - 30)) /* Virtualization Translation Control Register (VTCR) bits */ diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 265ea9cf7df7..12ae5fbbcf01 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -35,16 +35,12 @@ addr; \ }) -/* - * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels. - */ -#define KVM_MMU_CACHE_MIN_PAGES 2 - #ifndef __ASSEMBLY__ #include #include #include +#include #include #include #include @@ -52,6 +48,13 @@ /* Ensure compatibility with arm64 */ #define VA_BITS 32 +#define kvm_phys_shift(kvm) KVM_PHYS_SHIFT +#define kvm_phys_size(kvm) (1ULL << kvm_phys_shift(kvm)) +#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - 1ULL) +#define kvm_vttbr_baddr_mask(kvm) VTTBR_BADDR_MASK + +#define stage2_pgd_size(kvm) (PTRS_PER_S2_PGD * sizeof(pgd_t)) + int create_hyp_mappings(void *from, void *to, pgprot_t prot); int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, void __iomem **kaddr, diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h index 460d616bb2d6..f6a7ea805232 100644 --- a/arch/arm/include/asm/stage2_pgtable.h +++ b/arch/arm/include/asm/stage2_pgtable.h @@ -19,43 +19,53 @@ #ifndef __ARM_S2_PGTABLE_H_ #define __ARM_S2_PGTABLE_H_ -#define stage2_pgd_none(pgd) pgd_none(pgd) -#define stage2_pgd_clear(pgd) pgd_clear(pgd) -#define stage2_pgd_present(pgd) pgd_present(pgd) -#define stage2_pgd_populate(pgd, pud) pgd_populate(NULL, pgd, pud) -#define stage2_pud_offset(pgd, address) pud_offset(pgd, address) -#define stage2_pud_free(pud) pud_free(NULL, pud) +/* + * kvm_mmu_cache_min_pages() is the number of pages required + * to install a stage-2 translation. We pre-allocate the entry + * level table at VM creation. Since we have a 3 level page-table, + * we need only two pages to add a new mapping. + */ +#define kvm_mmu_cache_min_pages(kvm) 2 -#define stage2_pud_none(pud) pud_none(pud) -#define stage2_pud_clear(pud) pud_clear(pud) -#define stage2_pud_present(pud) pud_present(pud) -#define stage2_pud_populate(pud, pmd) pud_populate(NULL, pud, pmd) -#define stage2_pmd_offset(pud, address) pmd_offset(pud, address) -#define stage2_pmd_free(pmd) pmd_free(NULL, pmd) +#define stage2_pgd_none(kvm, pgd) pgd_none(pgd) +#define stage2_pgd_clear(kvm, pgd) pgd_clear(pgd) +#define stage2_pgd_present(kvm, pgd) pgd_present(pgd) +#define stage2_pgd_populate(kvm, pgd, pud) pgd_populate(NULL, pgd, pud) +#define stage2_pud_offset(kvm, pgd, address) pud_offset(pgd, address) +#define stage2_pud_free(kvm, pud) pud_free(NULL, pud) -#define stage2_pud_huge(pud) pud_huge(pud) +#define stage2_pud_none(kvm, pud) pud_none(pud) +#define stage2_pud_clear(kvm, pud) pud_clear(pud) +#define stage2_pud_present(kvm, pud) pud_present(pud) +#define stage2_pud_populate(kvm, pud, pmd) pud_populate(NULL, pud, pmd) +#define stage2_pmd_offset(kvm, pud, address) pmd_offset(pud, address) +#define stage2_pmd_free(kvm, pmd) pmd_free(NULL, pmd) + +#define stage2_pud_huge(kvm, pud) pud_huge(pud) /* Open coded p*d_addr_end that can deal with 64bit addresses */ -static inline phys_addr_t stage2_pgd_addr_end(phys_addr_t addr, phys_addr_t end) +static inline phys_addr_t +stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) { phys_addr_t boundary = (addr + PGDIR_SIZE) & PGDIR_MASK; return (boundary - 1 < end - 1) ? boundary : end; } -#define stage2_pud_addr_end(addr, end) (end) +#define stage2_pud_addr_end(kvm, addr, end) (end) -static inline phys_addr_t stage2_pmd_addr_end(phys_addr_t addr, phys_addr_t end) +static inline phys_addr_t +stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) { phys_addr_t boundary = (addr + PMD_SIZE) & PMD_MASK; return (boundary - 1 < end - 1) ? boundary : end; } -#define stage2_pgd_index(addr) pgd_index(addr) +#define stage2_pgd_index(kvm, addr) pgd_index(addr) -#define stage2_pte_table_empty(ptep) kvm_page_empty(ptep) -#define stage2_pmd_table_empty(pmdp) kvm_page_empty(pmdp) -#define stage2_pud_table_empty(pudp) false +#define stage2_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) +#define stage2_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) +#define stage2_pud_table_empty(kvm, pudp) false #endif /* __ARM_S2_PGTABLE_H_ */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index d6fff7de5539..3a032066e52c 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -141,8 +141,11 @@ static inline unsigned long __kern_hyp_va(unsigned long v) * We currently only support a 40bit IPA. */ #define KVM_PHYS_SHIFT (40) -#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) -#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) + +#define kvm_phys_shift(kvm) KVM_PHYS_SHIFT +#define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm)) +#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL)) +#define kvm_vttbr_baddr_mask(kvm) VTTBR_BADDR_MASK #include diff --git a/arch/arm64/include/asm/stage2_pgtable-nopmd.h b/arch/arm64/include/asm/stage2_pgtable-nopmd.h index 2656a0fd05a6..0280dedbf75f 100644 --- a/arch/arm64/include/asm/stage2_pgtable-nopmd.h +++ b/arch/arm64/include/asm/stage2_pgtable-nopmd.h @@ -26,17 +26,17 @@ #define S2_PMD_SIZE (1UL << S2_PMD_SHIFT) #define S2_PMD_MASK (~(S2_PMD_SIZE-1)) -#define stage2_pud_none(pud) (0) -#define stage2_pud_present(pud) (1) -#define stage2_pud_clear(pud) do { } while (0) -#define stage2_pud_populate(pud, pmd) do { } while (0) -#define stage2_pmd_offset(pud, address) ((pmd_t *)(pud)) +#define stage2_pud_none(kvm, pud) (0) +#define stage2_pud_present(kvm, pud) (1) +#define stage2_pud_clear(kvm, pud) do { } while (0) +#define stage2_pud_populate(kvm, pud, pmd) do { } while (0) +#define stage2_pmd_offset(kvm, pud, address) ((pmd_t *)(pud)) -#define stage2_pmd_free(pmd) do { } while (0) +#define stage2_pmd_free(kvm, pmd) do { } while (0) -#define stage2_pmd_addr_end(addr, end) (end) +#define stage2_pmd_addr_end(kvm, addr, end) (end) -#define stage2_pud_huge(pud) (0) -#define stage2_pmd_table_empty(pmdp) (0) +#define stage2_pud_huge(kvm, pud) (0) +#define stage2_pmd_table_empty(kvm, pmdp) (0) #endif diff --git a/arch/arm64/include/asm/stage2_pgtable-nopud.h b/arch/arm64/include/asm/stage2_pgtable-nopud.h index 5ee87b54ebf3..cd6304e203be 100644 --- a/arch/arm64/include/asm/stage2_pgtable-nopud.h +++ b/arch/arm64/include/asm/stage2_pgtable-nopud.h @@ -24,16 +24,16 @@ #define S2_PUD_SIZE (_AC(1, UL) << S2_PUD_SHIFT) #define S2_PUD_MASK (~(S2_PUD_SIZE-1)) -#define stage2_pgd_none(pgd) (0) -#define stage2_pgd_present(pgd) (1) -#define stage2_pgd_clear(pgd) do { } while (0) -#define stage2_pgd_populate(pgd, pud) do { } while (0) +#define stage2_pgd_none(kvm, pgd) (0) +#define stage2_pgd_present(kvm, pgd) (1) +#define stage2_pgd_clear(kvm, pgd) do { } while (0) +#define stage2_pgd_populate(kvm, pgd, pud) do { } while (0) -#define stage2_pud_offset(pgd, address) ((pud_t *)(pgd)) +#define stage2_pud_offset(kvm, pgd, address) ((pud_t *)(pgd)) -#define stage2_pud_free(x) do { } while (0) +#define stage2_pud_free(kvm, x) do { } while (0) -#define stage2_pud_addr_end(addr, end) (end) -#define stage2_pud_table_empty(pmdp) (0) +#define stage2_pud_addr_end(kvm, addr, end) (end) +#define stage2_pud_table_empty(kvm, pmdp) (0) #endif diff --git a/arch/arm64/include/asm/stage2_pgtable.h b/arch/arm64/include/asm/stage2_pgtable.h index 8b68099348e5..11891612be14 100644 --- a/arch/arm64/include/asm/stage2_pgtable.h +++ b/arch/arm64/include/asm/stage2_pgtable.h @@ -55,7 +55,7 @@ /* S2_PGDIR_SHIFT is the size mapped by top-level stage2 entry */ #define S2_PGDIR_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - STAGE2_PGTABLE_LEVELS) -#define S2_PGDIR_SIZE (_AC(1, UL) << S2_PGDIR_SHIFT) +#define S2_PGDIR_SIZE (1UL << S2_PGDIR_SHIFT) #define S2_PGDIR_MASK (~(S2_PGDIR_SIZE - 1)) /* @@ -65,28 +65,30 @@ #define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - S2_PGDIR_SHIFT)) /* - * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation - * levels in addition to the PGD. + * kvm_mmmu_cache_min_pages() is the number of pages required to install + * a stage-2 translation. We pre-allocate the entry level page table at + * the VM creation. */ -#define KVM_MMU_CACHE_MIN_PAGES (STAGE2_PGTABLE_LEVELS - 1) +#define kvm_mmu_cache_min_pages(kvm) (STAGE2_PGTABLE_LEVELS - 1) #if STAGE2_PGTABLE_LEVELS > 3 #define S2_PUD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(1) -#define S2_PUD_SIZE (_AC(1, UL) << S2_PUD_SHIFT) +#define S2_PUD_SIZE (1UL << S2_PUD_SHIFT) #define S2_PUD_MASK (~(S2_PUD_SIZE - 1)) -#define stage2_pgd_none(pgd) pgd_none(pgd) -#define stage2_pgd_clear(pgd) pgd_clear(pgd) -#define stage2_pgd_present(pgd) pgd_present(pgd) -#define stage2_pgd_populate(pgd, pud) pgd_populate(NULL, pgd, pud) -#define stage2_pud_offset(pgd, address) pud_offset(pgd, address) -#define stage2_pud_free(pud) pud_free(NULL, pud) +#define stage2_pgd_none(kvm, pgd) pgd_none(pgd) +#define stage2_pgd_clear(kvm, pgd) pgd_clear(pgd) +#define stage2_pgd_present(kvm, pgd) pgd_present(pgd) +#define stage2_pgd_populate(kvm, pgd, pud) pgd_populate(NULL, pgd, pud) +#define stage2_pud_offset(kvm, pgd, address) pud_offset(pgd, address) +#define stage2_pud_free(kvm, pud) pud_free(NULL, pud) -#define stage2_pud_table_empty(pudp) kvm_page_empty(pudp) +#define stage2_pud_table_empty(kvm, pudp) kvm_page_empty(pudp) -static inline phys_addr_t stage2_pud_addr_end(phys_addr_t addr, phys_addr_t end) +static inline phys_addr_t +stage2_pud_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) { phys_addr_t boundary = (addr + S2_PUD_SIZE) & S2_PUD_MASK; @@ -99,20 +101,21 @@ static inline phys_addr_t stage2_pud_addr_end(phys_addr_t addr, phys_addr_t end) #if STAGE2_PGTABLE_LEVELS > 2 #define S2_PMD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(2) -#define S2_PMD_SIZE (_AC(1, UL) << S2_PMD_SHIFT) +#define S2_PMD_SIZE (1UL << S2_PMD_SHIFT) #define S2_PMD_MASK (~(S2_PMD_SIZE - 1)) -#define stage2_pud_none(pud) pud_none(pud) -#define stage2_pud_clear(pud) pud_clear(pud) -#define stage2_pud_present(pud) pud_present(pud) -#define stage2_pud_populate(pud, pmd) pud_populate(NULL, pud, pmd) -#define stage2_pmd_offset(pud, address) pmd_offset(pud, address) -#define stage2_pmd_free(pmd) pmd_free(NULL, pmd) +#define stage2_pud_none(kvm, pud) pud_none(pud) +#define stage2_pud_clear(kvm, pud) pud_clear(pud) +#define stage2_pud_present(kvm, pud) pud_present(pud) +#define stage2_pud_populate(kvm, pud, pmd) pud_populate(NULL, pud, pmd) +#define stage2_pmd_offset(kvm, pud, address) pmd_offset(pud, address) +#define stage2_pmd_free(kvm, pmd) pmd_free(NULL, pmd) -#define stage2_pud_huge(pud) pud_huge(pud) -#define stage2_pmd_table_empty(pmdp) kvm_page_empty(pmdp) +#define stage2_pud_huge(kvm, pud) pud_huge(pud) +#define stage2_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) -static inline phys_addr_t stage2_pmd_addr_end(phys_addr_t addr, phys_addr_t end) +static inline phys_addr_t +stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) { phys_addr_t boundary = (addr + S2_PMD_SIZE) & S2_PMD_MASK; @@ -121,7 +124,7 @@ static inline phys_addr_t stage2_pmd_addr_end(phys_addr_t addr, phys_addr_t end) #endif /* STAGE2_PGTABLE_LEVELS > 2 */ -#define stage2_pte_table_empty(ptep) kvm_page_empty(ptep) +#define stage2_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) #if STAGE2_PGTABLE_LEVELS == 2 #include @@ -129,10 +132,13 @@ static inline phys_addr_t stage2_pmd_addr_end(phys_addr_t addr, phys_addr_t end) #include #endif +#define stage2_pgd_size(kvm) (PTRS_PER_S2_PGD * sizeof(pgd_t)) -#define stage2_pgd_index(addr) (((addr) >> S2_PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)) +#define stage2_pgd_index(kvm, addr) \ + (((addr) >> S2_PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)) -static inline phys_addr_t stage2_pgd_addr_end(phys_addr_t addr, phys_addr_t end) +static inline phys_addr_t +stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) { phys_addr_t boundary = (addr + S2_PGDIR_SIZE) & S2_PGDIR_MASK; diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 327d0fd28380..43e716bc3f08 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -545,7 +545,7 @@ static void update_vttbr(struct kvm *kvm) /* update vttbr to be used with the new vmid */ pgd_phys = virt_to_phys(kvm->arch.pgd); - BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK); + BUG_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm)); vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits); kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid; diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 4a285d760ce0..7e477b3cae5b 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -45,7 +45,6 @@ static phys_addr_t hyp_idmap_vector; static unsigned long io_map_base; -#define S2_PGD_SIZE (PTRS_PER_S2_PGD * sizeof(pgd_t)) #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t)) #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0) @@ -150,20 +149,20 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr) { - pud_t *pud_table __maybe_unused = stage2_pud_offset(pgd, 0UL); - stage2_pgd_clear(pgd); + pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, pgd, 0UL); + stage2_pgd_clear(kvm, pgd); kvm_tlb_flush_vmid_ipa(kvm, addr); - stage2_pud_free(pud_table); + stage2_pud_free(kvm, pud_table); put_page(virt_to_page(pgd)); } static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) { - pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(pud, 0); - VM_BUG_ON(stage2_pud_huge(*pud)); - stage2_pud_clear(pud); + pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(kvm, pud, 0); + VM_BUG_ON(stage2_pud_huge(kvm, *pud)); + stage2_pud_clear(kvm, pud); kvm_tlb_flush_vmid_ipa(kvm, addr); - stage2_pmd_free(pmd_table); + stage2_pmd_free(kvm, pmd_table); put_page(virt_to_page(pud)); } @@ -252,7 +251,7 @@ static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd, } } while (pte++, addr += PAGE_SIZE, addr != end); - if (stage2_pte_table_empty(start_pte)) + if (stage2_pte_table_empty(kvm, start_pte)) clear_stage2_pmd_entry(kvm, pmd, start_addr); } @@ -262,9 +261,9 @@ static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud, phys_addr_t next, start_addr = addr; pmd_t *pmd, *start_pmd; - start_pmd = pmd = stage2_pmd_offset(pud, addr); + start_pmd = pmd = stage2_pmd_offset(kvm, pud, addr); do { - next = stage2_pmd_addr_end(addr, end); + next = stage2_pmd_addr_end(kvm, addr, end); if (!pmd_none(*pmd)) { if (pmd_thp_or_huge(*pmd)) { pmd_t old_pmd = *pmd; @@ -281,7 +280,7 @@ static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud, } } while (pmd++, addr = next, addr != end); - if (stage2_pmd_table_empty(start_pmd)) + if (stage2_pmd_table_empty(kvm, start_pmd)) clear_stage2_pud_entry(kvm, pud, start_addr); } @@ -291,14 +290,14 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd, phys_addr_t next, start_addr = addr; pud_t *pud, *start_pud; - start_pud = pud = stage2_pud_offset(pgd, addr); + start_pud = pud = stage2_pud_offset(kvm, pgd, addr); do { - next = stage2_pud_addr_end(addr, end); - if (!stage2_pud_none(*pud)) { - if (stage2_pud_huge(*pud)) { + next = stage2_pud_addr_end(kvm, addr, end); + if (!stage2_pud_none(kvm, *pud)) { + if (stage2_pud_huge(kvm, *pud)) { pud_t old_pud = *pud; - stage2_pud_clear(pud); + stage2_pud_clear(kvm, pud); kvm_tlb_flush_vmid_ipa(kvm, addr); kvm_flush_dcache_pud(old_pud); put_page(virt_to_page(pud)); @@ -308,7 +307,7 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd, } } while (pud++, addr = next, addr != end); - if (stage2_pud_table_empty(start_pud)) + if (stage2_pud_table_empty(kvm, start_pud)) clear_stage2_pgd_entry(kvm, pgd, start_addr); } @@ -332,7 +331,7 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) assert_spin_locked(&kvm->mmu_lock); WARN_ON(size & ~PAGE_MASK); - pgd = kvm->arch.pgd + stage2_pgd_index(addr); + pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr); do { /* * Make sure the page table is still active, as another thread @@ -341,8 +340,8 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) */ if (!READ_ONCE(kvm->arch.pgd)) break; - next = stage2_pgd_addr_end(addr, end); - if (!stage2_pgd_none(*pgd)) + next = stage2_pgd_addr_end(kvm, addr, end); + if (!stage2_pgd_none(kvm, *pgd)) unmap_stage2_puds(kvm, pgd, addr, next); /* * If the range is too large, release the kvm->mmu_lock @@ -371,9 +370,9 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud, pmd_t *pmd; phys_addr_t next; - pmd = stage2_pmd_offset(pud, addr); + pmd = stage2_pmd_offset(kvm, pud, addr); do { - next = stage2_pmd_addr_end(addr, end); + next = stage2_pmd_addr_end(kvm, addr, end); if (!pmd_none(*pmd)) { if (pmd_thp_or_huge(*pmd)) kvm_flush_dcache_pmd(*pmd); @@ -389,11 +388,11 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd, pud_t *pud; phys_addr_t next; - pud = stage2_pud_offset(pgd, addr); + pud = stage2_pud_offset(kvm, pgd, addr); do { - next = stage2_pud_addr_end(addr, end); - if (!stage2_pud_none(*pud)) { - if (stage2_pud_huge(*pud)) + next = stage2_pud_addr_end(kvm, addr, end); + if (!stage2_pud_none(kvm, *pud)) { + if (stage2_pud_huge(kvm, *pud)) kvm_flush_dcache_pud(*pud); else stage2_flush_pmds(kvm, pud, addr, next); @@ -409,10 +408,10 @@ static void stage2_flush_memslot(struct kvm *kvm, phys_addr_t next; pgd_t *pgd; - pgd = kvm->arch.pgd + stage2_pgd_index(addr); + pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr); do { - next = stage2_pgd_addr_end(addr, end); - if (!stage2_pgd_none(*pgd)) + next = stage2_pgd_addr_end(kvm, addr, end); + if (!stage2_pgd_none(kvm, *pgd)) stage2_flush_puds(kvm, pgd, addr, next); } while (pgd++, addr = next, addr != end); } @@ -898,7 +897,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) } /* Allocate the HW PGD, making sure that each page gets its own refcount */ - pgd = alloc_pages_exact(S2_PGD_SIZE, GFP_KERNEL | __GFP_ZERO); + pgd = alloc_pages_exact(stage2_pgd_size(kvm), GFP_KERNEL | __GFP_ZERO); if (!pgd) return -ENOMEM; @@ -987,7 +986,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm) spin_lock(&kvm->mmu_lock); if (kvm->arch.pgd) { - unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); + unmap_stage2_range(kvm, 0, kvm_phys_size(kvm)); pgd = READ_ONCE(kvm->arch.pgd); kvm->arch.pgd = NULL; } @@ -995,7 +994,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm) /* Free the HW pgd, one page at a time */ if (pgd) - free_pages_exact(pgd, S2_PGD_SIZE); + free_pages_exact(pgd, stage2_pgd_size(kvm)); } static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, @@ -1004,16 +1003,16 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache pgd_t *pgd; pud_t *pud; - pgd = kvm->arch.pgd + stage2_pgd_index(addr); - if (stage2_pgd_none(*pgd)) { + pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr); + if (stage2_pgd_none(kvm, *pgd)) { if (!cache) return NULL; pud = mmu_memory_cache_alloc(cache); - stage2_pgd_populate(pgd, pud); + stage2_pgd_populate(kvm, pgd, pud); get_page(virt_to_page(pgd)); } - return stage2_pud_offset(pgd, addr); + return stage2_pud_offset(kvm, pgd, addr); } static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, @@ -1026,15 +1025,15 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache if (!pud) return NULL; - if (stage2_pud_none(*pud)) { + if (stage2_pud_none(kvm, *pud)) { if (!cache) return NULL; pmd = mmu_memory_cache_alloc(cache); - stage2_pud_populate(pud, pmd); + stage2_pud_populate(kvm, pud, pmd); get_page(virt_to_page(pud)); } - return stage2_pmd_offset(pud, addr); + return stage2_pmd_offset(kvm, pud, addr); } static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache @@ -1208,8 +1207,9 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, if (writable) pte = kvm_s2pte_mkwrite(pte); - ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES, - KVM_NR_MEM_OBJS); + ret = mmu_topup_memory_cache(&cache, + kvm_mmu_cache_min_pages(kvm), + KVM_NR_MEM_OBJS); if (ret) goto out; spin_lock(&kvm->mmu_lock); @@ -1297,19 +1297,21 @@ static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end) /** * stage2_wp_pmds - write protect PUD range + * kvm: kvm instance for the VM * @pud: pointer to pud entry * @addr: range start address * @end: range end address */ -static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end) +static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud, + phys_addr_t addr, phys_addr_t end) { pmd_t *pmd; phys_addr_t next; - pmd = stage2_pmd_offset(pud, addr); + pmd = stage2_pmd_offset(kvm, pud, addr); do { - next = stage2_pmd_addr_end(addr, end); + next = stage2_pmd_addr_end(kvm, addr, end); if (!pmd_none(*pmd)) { if (pmd_thp_or_huge(*pmd)) { if (!kvm_s2pmd_readonly(pmd)) @@ -1329,18 +1331,19 @@ static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end) * * Process PUD entries, for a huge PUD we cause a panic. */ -static void stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end) +static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd, + phys_addr_t addr, phys_addr_t end) { pud_t *pud; phys_addr_t next; - pud = stage2_pud_offset(pgd, addr); + pud = stage2_pud_offset(kvm, pgd, addr); do { - next = stage2_pud_addr_end(addr, end); - if (!stage2_pud_none(*pud)) { + next = stage2_pud_addr_end(kvm, addr, end); + if (!stage2_pud_none(kvm, *pud)) { /* TODO:PUD not supported, revisit later if supported */ - BUG_ON(stage2_pud_huge(*pud)); - stage2_wp_pmds(pud, addr, next); + BUG_ON(stage2_pud_huge(kvm, *pud)); + stage2_wp_pmds(kvm, pud, addr, next); } } while (pud++, addr = next, addr != end); } @@ -1356,7 +1359,7 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) pgd_t *pgd; phys_addr_t next; - pgd = kvm->arch.pgd + stage2_pgd_index(addr); + pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr); do { /* * Release kvm_mmu_lock periodically if the memory region is @@ -1370,9 +1373,9 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) cond_resched_lock(&kvm->mmu_lock); if (!READ_ONCE(kvm->arch.pgd)) break; - next = stage2_pgd_addr_end(addr, end); - if (stage2_pgd_present(*pgd)) - stage2_wp_puds(pgd, addr, next); + next = stage2_pgd_addr_end(kvm, addr, end); + if (stage2_pgd_present(kvm, *pgd)) + stage2_wp_puds(kvm, pgd, addr, next); } while (pgd++, addr = next, addr != end); } @@ -1521,7 +1524,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, up_read(¤t->mm->mmap_sem); /* We need minimum second+third level pages */ - ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES, + ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm), KVM_NR_MEM_OBJS); if (ret) return ret; @@ -1764,7 +1767,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) } /* Userspace should not be able to register out-of-bounds IPAs */ - VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE); + VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm)); if (fault_status == FSC_ACCESS) { handle_access_fault(vcpu, fault_ipa); @@ -2063,7 +2066,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, * space addressable by the KVM guest IPA space. */ if (memslot->base_gfn + memslot->npages >= - (KVM_PHYS_SIZE >> PAGE_SHIFT)) + (kvm_phys_size(kvm) >> PAGE_SHIFT)) return -EFAULT; down_read(¤t->mm->mmap_sem); diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c index 6ada2432e37c..114dce9f4bf5 100644 --- a/virt/kvm/arm/vgic/vgic-kvm-device.c +++ b/virt/kvm/arm/vgic/vgic-kvm-device.c @@ -25,7 +25,7 @@ int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr, phys_addr_t addr, phys_addr_t alignment) { - if (addr & ~KVM_PHYS_MASK) + if (addr & ~kvm_phys_mask(kvm)) return -E2BIG; if (!IS_ALIGNED(addr, alignment)) From 865b30cdd9b286820aef44393bafc4c0ccee53d0 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Wed, 26 Sep 2018 17:32:45 +0100 Subject: [PATCH 037/204] kvm: arm64: Prepare for dynamic stage2 page table layout Our stage2 page table helpers are statically defined based on the fixed IPA of 40bits and the host page size. As we are about to add support for configurable IPA size for VMs, we need to make the page table checks for each VM. This patch prepares the stage2 helpers to make the transition to a VM dependent table layout easier. Instead of statically defining the table helpers based on the page table levels, we now check the page table levels in the helpers to do the right thing. In effect, it simply converts the macros to static inline functions. Cc: Eric Auger Cc: Marc Zyngier Cc: Christoffer Dall Signed-off-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_mmu.h | 12 +- arch/arm64/include/asm/stage2_pgtable-nopmd.h | 42 ----- arch/arm64/include/asm/stage2_pgtable-nopud.h | 39 ---- arch/arm64/include/asm/stage2_pgtable.h | 169 ++++++++++++++---- 4 files changed, 137 insertions(+), 125 deletions(-) delete mode 100644 arch/arm64/include/asm/stage2_pgtable-nopmd.h delete mode 100644 arch/arm64/include/asm/stage2_pgtable-nopud.h diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 3a032066e52c..7342d2c51773 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -147,6 +147,12 @@ static inline unsigned long __kern_hyp_va(unsigned long v) #define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL)) #define kvm_vttbr_baddr_mask(kvm) VTTBR_BADDR_MASK +static inline bool kvm_page_empty(void *ptr) +{ + struct page *ptr_page = virt_to_page(ptr); + return page_count(ptr_page) == 1; +} + #include int create_hyp_mappings(void *from, void *to, pgprot_t prot); @@ -241,12 +247,6 @@ static inline bool kvm_s2pmd_exec(pmd_t *pmdp) return !(READ_ONCE(pmd_val(*pmdp)) & PMD_S2_XN); } -static inline bool kvm_page_empty(void *ptr) -{ - struct page *ptr_page = virt_to_page(ptr); - return page_count(ptr_page) == 1; -} - #define hyp_pte_table_empty(ptep) kvm_page_empty(ptep) #ifdef __PAGETABLE_PMD_FOLDED diff --git a/arch/arm64/include/asm/stage2_pgtable-nopmd.h b/arch/arm64/include/asm/stage2_pgtable-nopmd.h deleted file mode 100644 index 0280dedbf75f..000000000000 --- a/arch/arm64/include/asm/stage2_pgtable-nopmd.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (C) 2016 - ARM Ltd - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef __ARM64_S2_PGTABLE_NOPMD_H_ -#define __ARM64_S2_PGTABLE_NOPMD_H_ - -#include - -#define __S2_PGTABLE_PMD_FOLDED - -#define S2_PMD_SHIFT S2_PUD_SHIFT -#define S2_PTRS_PER_PMD 1 -#define S2_PMD_SIZE (1UL << S2_PMD_SHIFT) -#define S2_PMD_MASK (~(S2_PMD_SIZE-1)) - -#define stage2_pud_none(kvm, pud) (0) -#define stage2_pud_present(kvm, pud) (1) -#define stage2_pud_clear(kvm, pud) do { } while (0) -#define stage2_pud_populate(kvm, pud, pmd) do { } while (0) -#define stage2_pmd_offset(kvm, pud, address) ((pmd_t *)(pud)) - -#define stage2_pmd_free(kvm, pmd) do { } while (0) - -#define stage2_pmd_addr_end(kvm, addr, end) (end) - -#define stage2_pud_huge(kvm, pud) (0) -#define stage2_pmd_table_empty(kvm, pmdp) (0) - -#endif diff --git a/arch/arm64/include/asm/stage2_pgtable-nopud.h b/arch/arm64/include/asm/stage2_pgtable-nopud.h deleted file mode 100644 index cd6304e203be..000000000000 --- a/arch/arm64/include/asm/stage2_pgtable-nopud.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (C) 2016 - ARM Ltd - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef __ARM64_S2_PGTABLE_NOPUD_H_ -#define __ARM64_S2_PGTABLE_NOPUD_H_ - -#define __S2_PGTABLE_PUD_FOLDED - -#define S2_PUD_SHIFT S2_PGDIR_SHIFT -#define S2_PTRS_PER_PUD 1 -#define S2_PUD_SIZE (_AC(1, UL) << S2_PUD_SHIFT) -#define S2_PUD_MASK (~(S2_PUD_SIZE-1)) - -#define stage2_pgd_none(kvm, pgd) (0) -#define stage2_pgd_present(kvm, pgd) (1) -#define stage2_pgd_clear(kvm, pgd) do { } while (0) -#define stage2_pgd_populate(kvm, pgd, pud) do { } while (0) - -#define stage2_pud_offset(kvm, pgd, address) ((pud_t *)(pgd)) - -#define stage2_pud_free(kvm, x) do { } while (0) - -#define stage2_pud_addr_end(kvm, addr, end) (end) -#define stage2_pud_table_empty(kvm, pmdp) (0) - -#endif diff --git a/arch/arm64/include/asm/stage2_pgtable.h b/arch/arm64/include/asm/stage2_pgtable.h index 11891612be14..384f9e982cba 100644 --- a/arch/arm64/include/asm/stage2_pgtable.h +++ b/arch/arm64/include/asm/stage2_pgtable.h @@ -19,6 +19,7 @@ #ifndef __ARM64_S2_PGTABLE_H_ #define __ARM64_S2_PGTABLE_H_ +#include #include /* @@ -71,71 +72,163 @@ */ #define kvm_mmu_cache_min_pages(kvm) (STAGE2_PGTABLE_LEVELS - 1) - -#if STAGE2_PGTABLE_LEVELS > 3 - +/* Stage2 PUD definitions when the level is present */ +#define STAGE2_PGTABLE_HAS_PUD (STAGE2_PGTABLE_LEVELS > 3) #define S2_PUD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(1) #define S2_PUD_SIZE (1UL << S2_PUD_SHIFT) #define S2_PUD_MASK (~(S2_PUD_SIZE - 1)) -#define stage2_pgd_none(kvm, pgd) pgd_none(pgd) -#define stage2_pgd_clear(kvm, pgd) pgd_clear(pgd) -#define stage2_pgd_present(kvm, pgd) pgd_present(pgd) -#define stage2_pgd_populate(kvm, pgd, pud) pgd_populate(NULL, pgd, pud) -#define stage2_pud_offset(kvm, pgd, address) pud_offset(pgd, address) -#define stage2_pud_free(kvm, pud) pud_free(NULL, pud) +static inline bool stage2_pgd_none(struct kvm *kvm, pgd_t pgd) +{ + if (STAGE2_PGTABLE_HAS_PUD) + return pgd_none(pgd); + else + return 0; +} -#define stage2_pud_table_empty(kvm, pudp) kvm_page_empty(pudp) +static inline void stage2_pgd_clear(struct kvm *kvm, pgd_t *pgdp) +{ + if (STAGE2_PGTABLE_HAS_PUD) + pgd_clear(pgdp); +} + +static inline bool stage2_pgd_present(struct kvm *kvm, pgd_t pgd) +{ + if (STAGE2_PGTABLE_HAS_PUD) + return pgd_present(pgd); + else + return 1; +} + +static inline void stage2_pgd_populate(struct kvm *kvm, pgd_t *pgd, pud_t *pud) +{ + if (STAGE2_PGTABLE_HAS_PUD) + pgd_populate(NULL, pgd, pud); +} + +static inline pud_t *stage2_pud_offset(struct kvm *kvm, + pgd_t *pgd, unsigned long address) +{ + if (STAGE2_PGTABLE_HAS_PUD) + return pud_offset(pgd, address); + else + return (pud_t *)pgd; +} + +static inline void stage2_pud_free(struct kvm *kvm, pud_t *pud) +{ + if (STAGE2_PGTABLE_HAS_PUD) + pud_free(NULL, pud); +} + +static inline bool stage2_pud_table_empty(struct kvm *kvm, pud_t *pudp) +{ + if (STAGE2_PGTABLE_HAS_PUD) + return kvm_page_empty(pudp); + else + return false; +} static inline phys_addr_t stage2_pud_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) { - phys_addr_t boundary = (addr + S2_PUD_SIZE) & S2_PUD_MASK; + if (STAGE2_PGTABLE_HAS_PUD) { + phys_addr_t boundary = (addr + S2_PUD_SIZE) & S2_PUD_MASK; - return (boundary - 1 < end - 1) ? boundary : end; + return (boundary - 1 < end - 1) ? boundary : end; + } else { + return end; + } } -#endif /* STAGE2_PGTABLE_LEVELS > 3 */ - - -#if STAGE2_PGTABLE_LEVELS > 2 - +/* Stage2 PMD definitions when the level is present */ +#define STAGE2_PGTABLE_HAS_PMD (STAGE2_PGTABLE_LEVELS > 2) #define S2_PMD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(2) #define S2_PMD_SIZE (1UL << S2_PMD_SHIFT) #define S2_PMD_MASK (~(S2_PMD_SIZE - 1)) -#define stage2_pud_none(kvm, pud) pud_none(pud) -#define stage2_pud_clear(kvm, pud) pud_clear(pud) -#define stage2_pud_present(kvm, pud) pud_present(pud) -#define stage2_pud_populate(kvm, pud, pmd) pud_populate(NULL, pud, pmd) -#define stage2_pmd_offset(kvm, pud, address) pmd_offset(pud, address) -#define stage2_pmd_free(kvm, pmd) pmd_free(NULL, pmd) +static inline bool stage2_pud_none(struct kvm *kvm, pud_t pud) +{ + if (STAGE2_PGTABLE_HAS_PMD) + return pud_none(pud); + else + return 0; +} -#define stage2_pud_huge(kvm, pud) pud_huge(pud) -#define stage2_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) +static inline void stage2_pud_clear(struct kvm *kvm, pud_t *pud) +{ + if (STAGE2_PGTABLE_HAS_PMD) + pud_clear(pud); +} + +static inline bool stage2_pud_present(struct kvm *kvm, pud_t pud) +{ + if (STAGE2_PGTABLE_HAS_PMD) + return pud_present(pud); + else + return 1; +} + +static inline void stage2_pud_populate(struct kvm *kvm, pud_t *pud, pmd_t *pmd) +{ + if (STAGE2_PGTABLE_HAS_PMD) + pud_populate(NULL, pud, pmd); +} + +static inline pmd_t *stage2_pmd_offset(struct kvm *kvm, + pud_t *pud, unsigned long address) +{ + if (STAGE2_PGTABLE_HAS_PMD) + return pmd_offset(pud, address); + else + return (pmd_t *)pud; +} + +static inline void stage2_pmd_free(struct kvm *kvm, pmd_t *pmd) +{ + if (STAGE2_PGTABLE_HAS_PMD) + pmd_free(NULL, pmd); +} + +static inline bool stage2_pud_huge(struct kvm *kvm, pud_t pud) +{ + if (STAGE2_PGTABLE_HAS_PMD) + return pud_huge(pud); + else + return 0; +} + +static inline bool stage2_pmd_table_empty(struct kvm *kvm, pmd_t *pmdp) +{ + if (STAGE2_PGTABLE_HAS_PMD) + return kvm_page_empty(pmdp); + else + return 0; +} static inline phys_addr_t stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) { - phys_addr_t boundary = (addr + S2_PMD_SIZE) & S2_PMD_MASK; + if (STAGE2_PGTABLE_HAS_PMD) { + phys_addr_t boundary = (addr + S2_PMD_SIZE) & S2_PMD_MASK; - return (boundary - 1 < end - 1) ? boundary : end; + return (boundary - 1 < end - 1) ? boundary : end; + } else { + return end; + } } -#endif /* STAGE2_PGTABLE_LEVELS > 2 */ - -#define stage2_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) - -#if STAGE2_PGTABLE_LEVELS == 2 -#include -#elif STAGE2_PGTABLE_LEVELS == 3 -#include -#endif +static inline bool stage2_pte_table_empty(struct kvm *kvm, pte_t *ptep) +{ + return kvm_page_empty(ptep); +} #define stage2_pgd_size(kvm) (PTRS_PER_S2_PGD * sizeof(pgd_t)) -#define stage2_pgd_index(kvm, addr) \ - (((addr) >> S2_PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)) +static inline unsigned long stage2_pgd_index(struct kvm *kvm, phys_addr_t addr) +{ + return (((addr) >> S2_PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)); +} static inline phys_addr_t stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) From 61fa5a867b6521b4c55865b88bf70c93078d0cf8 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Wed, 26 Sep 2018 17:32:46 +0100 Subject: [PATCH 038/204] kvm: arm64: Make stage2 page table layout dynamic Switch to dynamic stage2 page table layout based on the given VM. So far we had a common stage2 table layout determined at compile time. Make decision based on the VM instance depending on the IPA limit for the VM. Adds helpers to compute the stage2 parameters based on the guest's IPA and uses them to make the decisions. The IPA limit is still fixed to 40bits and the build time check to ensure the stage2 doesn't exceed the host kernels page table levels is retained. Also make sure that we use the pud/pmd level helpers from the host only when they are not folded. Cc: Christoffer Dall Cc: Marc Zyngier Reviewed-by: Eric Auger Signed-off-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/stage2_pgtable.h | 84 +++++++++++++++---------- 1 file changed, 52 insertions(+), 32 deletions(-) diff --git a/arch/arm64/include/asm/stage2_pgtable.h b/arch/arm64/include/asm/stage2_pgtable.h index 384f9e982cba..36a0a1165003 100644 --- a/arch/arm64/include/asm/stage2_pgtable.h +++ b/arch/arm64/include/asm/stage2_pgtable.h @@ -22,6 +22,13 @@ #include #include +/* + * PGDIR_SHIFT determines the size a top-level page table entry can map + * and depends on the number of levels in the page table. Compute the + * PGDIR_SHIFT for a given number of levels. + */ +#define pt_levels_pgdir_shift(lvls) ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - (lvls)) + /* * The hardware supports concatenation of up to 16 tables at stage2 entry level * and we use the feature whenever possible. @@ -30,11 +37,13 @@ * On arm64, the smallest PAGE_SIZE supported is 4k, which means * (PAGE_SHIFT - 3) > 4 holds for all page sizes. * This implies, the total number of page table levels at stage2 expected - * by the hardware is actually the number of levels required for (KVM_PHYS_SHIFT - 4) + * by the hardware is actually the number of levels required for (IPA_SHIFT - 4) * in normal translations(e.g, stage1), since we cannot have another level in - * the range (KVM_PHYS_SHIFT, KVM_PHYS_SHIFT - 4). + * the range (IPA_SHIFT, IPA_SHIFT - 4). */ -#define STAGE2_PGTABLE_LEVELS ARM64_HW_PGTABLE_LEVELS(KVM_PHYS_SHIFT - 4) +#define stage2_pgtable_levels(ipa) ARM64_HW_PGTABLE_LEVELS((ipa) - 4) +#define STAGE2_PGTABLE_LEVELS stage2_pgtable_levels(KVM_PHYS_SHIFT) +#define kvm_stage2_levels(kvm) stage2_pgtable_levels(kvm_phys_shift(kvm)) /* * With all the supported VA_BITs and 40bit guest IPA, the following condition @@ -54,33 +63,42 @@ #error "Unsupported combination of guest IPA and host VA_BITS." #endif -/* S2_PGDIR_SHIFT is the size mapped by top-level stage2 entry */ -#define S2_PGDIR_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - STAGE2_PGTABLE_LEVELS) -#define S2_PGDIR_SIZE (1UL << S2_PGDIR_SHIFT) -#define S2_PGDIR_MASK (~(S2_PGDIR_SIZE - 1)) + +/* stage2_pgdir_shift() is the size mapped by top-level stage2 entry for the VM */ +#define stage2_pgdir_shift(kvm) pt_levels_pgdir_shift(kvm_stage2_levels(kvm)) +#define stage2_pgdir_size(kvm) (1ULL << stage2_pgdir_shift(kvm)) +#define stage2_pgdir_mask(kvm) ~(stage2_pgdir_size(kvm) - 1) /* * The number of PTRS across all concatenated stage2 tables given by the * number of bits resolved at the initial level. */ -#define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - S2_PGDIR_SHIFT)) +#define __s2_pgd_ptrs(ipa, lvls) (1 << ((ipa) - pt_levels_pgdir_shift((lvls)))) +#define __s2_pgd_size(ipa, lvls) (__s2_pgd_ptrs((ipa), (lvls)) * sizeof(pgd_t)) + +#define stage2_pgd_ptrs(kvm) __s2_pgd_ptrs(kvm_phys_shift(kvm), kvm_stage2_levels(kvm)) +#define stage2_pgd_size(kvm) __s2_pgd_size(kvm_phys_shift(kvm), kvm_stage2_levels(kvm)) /* * kvm_mmmu_cache_min_pages() is the number of pages required to install * a stage-2 translation. We pre-allocate the entry level page table at * the VM creation. */ -#define kvm_mmu_cache_min_pages(kvm) (STAGE2_PGTABLE_LEVELS - 1) +#define kvm_mmu_cache_min_pages(kvm) (kvm_stage2_levels(kvm) - 1) /* Stage2 PUD definitions when the level is present */ -#define STAGE2_PGTABLE_HAS_PUD (STAGE2_PGTABLE_LEVELS > 3) +static inline bool kvm_stage2_has_pud(struct kvm *kvm) +{ + return (CONFIG_PGTABLE_LEVELS > 3) && (kvm_stage2_levels(kvm) > 3); +} + #define S2_PUD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(1) #define S2_PUD_SIZE (1UL << S2_PUD_SHIFT) #define S2_PUD_MASK (~(S2_PUD_SIZE - 1)) static inline bool stage2_pgd_none(struct kvm *kvm, pgd_t pgd) { - if (STAGE2_PGTABLE_HAS_PUD) + if (kvm_stage2_has_pud(kvm)) return pgd_none(pgd); else return 0; @@ -88,13 +106,13 @@ static inline bool stage2_pgd_none(struct kvm *kvm, pgd_t pgd) static inline void stage2_pgd_clear(struct kvm *kvm, pgd_t *pgdp) { - if (STAGE2_PGTABLE_HAS_PUD) + if (kvm_stage2_has_pud(kvm)) pgd_clear(pgdp); } static inline bool stage2_pgd_present(struct kvm *kvm, pgd_t pgd) { - if (STAGE2_PGTABLE_HAS_PUD) + if (kvm_stage2_has_pud(kvm)) return pgd_present(pgd); else return 1; @@ -102,14 +120,14 @@ static inline bool stage2_pgd_present(struct kvm *kvm, pgd_t pgd) static inline void stage2_pgd_populate(struct kvm *kvm, pgd_t *pgd, pud_t *pud) { - if (STAGE2_PGTABLE_HAS_PUD) + if (kvm_stage2_has_pud(kvm)) pgd_populate(NULL, pgd, pud); } static inline pud_t *stage2_pud_offset(struct kvm *kvm, pgd_t *pgd, unsigned long address) { - if (STAGE2_PGTABLE_HAS_PUD) + if (kvm_stage2_has_pud(kvm)) return pud_offset(pgd, address); else return (pud_t *)pgd; @@ -117,13 +135,13 @@ static inline pud_t *stage2_pud_offset(struct kvm *kvm, static inline void stage2_pud_free(struct kvm *kvm, pud_t *pud) { - if (STAGE2_PGTABLE_HAS_PUD) + if (kvm_stage2_has_pud(kvm)) pud_free(NULL, pud); } static inline bool stage2_pud_table_empty(struct kvm *kvm, pud_t *pudp) { - if (STAGE2_PGTABLE_HAS_PUD) + if (kvm_stage2_has_pud(kvm)) return kvm_page_empty(pudp); else return false; @@ -132,7 +150,7 @@ static inline bool stage2_pud_table_empty(struct kvm *kvm, pud_t *pudp) static inline phys_addr_t stage2_pud_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) { - if (STAGE2_PGTABLE_HAS_PUD) { + if (kvm_stage2_has_pud(kvm)) { phys_addr_t boundary = (addr + S2_PUD_SIZE) & S2_PUD_MASK; return (boundary - 1 < end - 1) ? boundary : end; @@ -142,14 +160,18 @@ stage2_pud_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) } /* Stage2 PMD definitions when the level is present */ -#define STAGE2_PGTABLE_HAS_PMD (STAGE2_PGTABLE_LEVELS > 2) +static inline bool kvm_stage2_has_pmd(struct kvm *kvm) +{ + return (CONFIG_PGTABLE_LEVELS > 2) && (kvm_stage2_levels(kvm) > 2); +} + #define S2_PMD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(2) #define S2_PMD_SIZE (1UL << S2_PMD_SHIFT) #define S2_PMD_MASK (~(S2_PMD_SIZE - 1)) static inline bool stage2_pud_none(struct kvm *kvm, pud_t pud) { - if (STAGE2_PGTABLE_HAS_PMD) + if (kvm_stage2_has_pmd(kvm)) return pud_none(pud); else return 0; @@ -157,13 +179,13 @@ static inline bool stage2_pud_none(struct kvm *kvm, pud_t pud) static inline void stage2_pud_clear(struct kvm *kvm, pud_t *pud) { - if (STAGE2_PGTABLE_HAS_PMD) + if (kvm_stage2_has_pmd(kvm)) pud_clear(pud); } static inline bool stage2_pud_present(struct kvm *kvm, pud_t pud) { - if (STAGE2_PGTABLE_HAS_PMD) + if (kvm_stage2_has_pmd(kvm)) return pud_present(pud); else return 1; @@ -171,14 +193,14 @@ static inline bool stage2_pud_present(struct kvm *kvm, pud_t pud) static inline void stage2_pud_populate(struct kvm *kvm, pud_t *pud, pmd_t *pmd) { - if (STAGE2_PGTABLE_HAS_PMD) + if (kvm_stage2_has_pmd(kvm)) pud_populate(NULL, pud, pmd); } static inline pmd_t *stage2_pmd_offset(struct kvm *kvm, pud_t *pud, unsigned long address) { - if (STAGE2_PGTABLE_HAS_PMD) + if (kvm_stage2_has_pmd(kvm)) return pmd_offset(pud, address); else return (pmd_t *)pud; @@ -186,13 +208,13 @@ static inline pmd_t *stage2_pmd_offset(struct kvm *kvm, static inline void stage2_pmd_free(struct kvm *kvm, pmd_t *pmd) { - if (STAGE2_PGTABLE_HAS_PMD) + if (kvm_stage2_has_pmd(kvm)) pmd_free(NULL, pmd); } static inline bool stage2_pud_huge(struct kvm *kvm, pud_t pud) { - if (STAGE2_PGTABLE_HAS_PMD) + if (kvm_stage2_has_pmd(kvm)) return pud_huge(pud); else return 0; @@ -200,7 +222,7 @@ static inline bool stage2_pud_huge(struct kvm *kvm, pud_t pud) static inline bool stage2_pmd_table_empty(struct kvm *kvm, pmd_t *pmdp) { - if (STAGE2_PGTABLE_HAS_PMD) + if (kvm_stage2_has_pmd(kvm)) return kvm_page_empty(pmdp); else return 0; @@ -209,7 +231,7 @@ static inline bool stage2_pmd_table_empty(struct kvm *kvm, pmd_t *pmdp) static inline phys_addr_t stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) { - if (STAGE2_PGTABLE_HAS_PMD) { + if (kvm_stage2_has_pmd(kvm)) { phys_addr_t boundary = (addr + S2_PMD_SIZE) & S2_PMD_MASK; return (boundary - 1 < end - 1) ? boundary : end; @@ -223,17 +245,15 @@ static inline bool stage2_pte_table_empty(struct kvm *kvm, pte_t *ptep) return kvm_page_empty(ptep); } -#define stage2_pgd_size(kvm) (PTRS_PER_S2_PGD * sizeof(pgd_t)) - static inline unsigned long stage2_pgd_index(struct kvm *kvm, phys_addr_t addr) { - return (((addr) >> S2_PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)); + return (((addr) >> stage2_pgdir_shift(kvm)) & (stage2_pgd_ptrs(kvm) - 1)); } static inline phys_addr_t stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) { - phys_addr_t boundary = (addr + S2_PGDIR_SIZE) & S2_PGDIR_MASK; + phys_addr_t boundary = (addr + stage2_pgdir_size(kvm)) & stage2_pgdir_mask(kvm); return (boundary - 1 < end - 1) ? boundary : end; } From 595583306434caafac4a71102dca5a8d32d1a769 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Wed, 26 Sep 2018 17:32:47 +0100 Subject: [PATCH 039/204] kvm: arm64: Dynamic configuration of VTTBR mask On arm64 VTTBR_EL2:BADDR holds the base address for the stage2 translation table. The Arm ARM mandates that the bits BADDR[x-1:0] should be 0, where 'x' is defined for a given IPA Size and the number of levels for a translation granule size. It is defined using some magical constants. This patch is a reverse engineered implementation to calculate the 'x' at runtime for a given ipa and number of page table levels. See patch for more details. Cc: Marc Zyngier Cc: Christoffer Dall Signed-off-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_arm.h | 73 ++++++++++++++++++++++++++++---- arch/arm64/include/asm/kvm_mmu.h | 25 ++++++++++- 2 files changed, 88 insertions(+), 10 deletions(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 14317b3a1820..b236d90ca056 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -123,7 +123,6 @@ #define VTCR_EL2_SL0_MASK (3 << VTCR_EL2_SL0_SHIFT) #define VTCR_EL2_SL0_LVL1 (1 << VTCR_EL2_SL0_SHIFT) #define VTCR_EL2_T0SZ_MASK 0x3f -#define VTCR_EL2_T0SZ_40B 24 #define VTCR_EL2_VS_SHIFT 19 #define VTCR_EL2_VS_8BIT (0 << VTCR_EL2_VS_SHIFT) #define VTCR_EL2_VS_16BIT (1 << VTCR_EL2_VS_SHIFT) @@ -140,11 +139,8 @@ * Note that when using 4K pages, we concatenate two first level page tables * together. With 16K pages, we concatenate 16 first level page tables. * - * The magic numbers used for VTTBR_X in this patch can be found in Tables - * D4-23 and D4-25 in ARM DDI 0487A.b. */ -#define VTCR_EL2_T0SZ_IPA VTCR_EL2_T0SZ_40B #define VTCR_EL2_COMMON_BITS (VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \ VTCR_EL2_IRGN0_WBWA | VTCR_EL2_RES1) @@ -155,7 +151,6 @@ * 2 level page tables (SL = 1) */ #define VTCR_EL2_TGRAN_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SL0_LVL1) -#define VTTBR_X_TGRAN_MAGIC 38 #elif defined(CONFIG_ARM64_16K_PAGES) /* * Stage2 translation configuration: @@ -163,7 +158,6 @@ * 2 level page tables (SL = 1) */ #define VTCR_EL2_TGRAN_FLAGS (VTCR_EL2_TG0_16K | VTCR_EL2_SL0_LVL1) -#define VTTBR_X_TGRAN_MAGIC 42 #else /* 4K */ /* * Stage2 translation configuration: @@ -171,13 +165,74 @@ * 3 level page tables (SL = 1) */ #define VTCR_EL2_TGRAN_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SL0_LVL1) -#define VTTBR_X_TGRAN_MAGIC 37 #endif #define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS) -#define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA) +/* + * ARM VMSAv8-64 defines an algorithm for finding the translation table + * descriptors in section D4.2.8 in ARM DDI 0487C.a. + * + * The algorithm defines the expectations on the translation table + * addresses for each level, based on PAGE_SIZE, entry level + * and the translation table size (T0SZ). The variable "x" in the + * algorithm determines the alignment of a table base address at a given + * level and thus determines the alignment of VTTBR:BADDR for stage2 + * page table entry level. + * Since the number of bits resolved at the entry level could vary + * depending on the T0SZ, the value of "x" is defined based on a + * Magic constant for a given PAGE_SIZE and Entry Level. The + * intermediate levels must be always aligned to the PAGE_SIZE (i.e, + * x = PAGE_SHIFT). + * + * The value of "x" for entry level is calculated as : + * x = Magic_N - T0SZ + * + * where Magic_N is an integer depending on the page size and the entry + * level of the page table as below: + * + * -------------------------------------------- + * | Entry level | 4K 16K 64K | + * -------------------------------------------- + * | Level: 0 (4 levels) | 28 | - | - | + * -------------------------------------------- + * | Level: 1 (3 levels) | 37 | 31 | 25 | + * -------------------------------------------- + * | Level: 2 (2 levels) | 46 | 42 | 38 | + * -------------------------------------------- + * | Level: 3 (1 level) | - | 53 | 51 | + * -------------------------------------------- + * + * We have a magic formula for the Magic_N below: + * + * Magic_N(PAGE_SIZE, Level) = 64 - ((PAGE_SHIFT - 3) * Number_of_levels) + * + * where Number_of_levels = (4 - Level). We are only interested in the + * value for Entry_Level for the stage2 page table. + * + * So, given that T0SZ = (64 - IPA_SHIFT), we can compute 'x' as follows: + * + * x = (64 - ((PAGE_SHIFT - 3) * Number_of_levels)) - (64 - IPA_SHIFT) + * = IPA_SHIFT - ((PAGE_SHIFT - 3) * Number of levels) + * + * Here is one way to explain the Magic Formula: + * + * x = log2(Size_of_Entry_Level_Table) + * + * Since, we can resolve (PAGE_SHIFT - 3) bits at each level, and another + * PAGE_SHIFT bits in the PTE, we have : + * + * Bits_Entry_level = IPA_SHIFT - ((PAGE_SHIFT - 3) * (n - 1) + PAGE_SHIFT) + * = IPA_SHIFT - (PAGE_SHIFT - 3) * n - 3 + * where n = number of levels, and since each pointer is 8bytes, we have: + * + * x = Bits_Entry_Level + 3 + * = IPA_SHIFT - (PAGE_SHIFT - 3) * n + * + * The only constraint here is that, we have to find the number of page table + * levels for a given IPA size (which we do, see stage2_pt_levels()) + */ +#define ARM64_VTTBR_X(ipa, levels) ((ipa) - ((levels) * (PAGE_SHIFT - 3))) -#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X) #define VTTBR_VMID_SHIFT (UL(48)) #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 7342d2c51773..ac3ca9690bad 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -145,7 +145,6 @@ static inline unsigned long __kern_hyp_va(unsigned long v) #define kvm_phys_shift(kvm) KVM_PHYS_SHIFT #define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm)) #define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL)) -#define kvm_vttbr_baddr_mask(kvm) VTTBR_BADDR_MASK static inline bool kvm_page_empty(void *ptr) { @@ -520,5 +519,29 @@ static inline int hyp_map_aux_data(void) #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr) +/* + * Get the magic number 'x' for VTTBR:BADDR of this KVM instance. + * With v8.2 LVA extensions, 'x' should be a minimum of 6 with + * 52bit IPS. + */ +static inline int arm64_vttbr_x(u32 ipa_shift, u32 levels) +{ + int x = ARM64_VTTBR_X(ipa_shift, levels); + + return (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && x < 6) ? 6 : x; +} + +static inline u64 vttbr_baddr_mask(u32 ipa_shift, u32 levels) +{ + unsigned int x = arm64_vttbr_x(ipa_shift, levels); + + return GENMASK_ULL(PHYS_MASK_SHIFT - 1, x); +} + +static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm) +{ + return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm)); +} + #endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */ From 7e8130456e067f49693fdcc25b5ba242a9c5568b Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Wed, 26 Sep 2018 17:32:48 +0100 Subject: [PATCH 040/204] kvm: arm64: Configure VTCR_EL2.SL0 per VM VTCR_EL2 holds the following key stage2 translation table parameters: SL0 - Entry level in the page table lookup. T0SZ - Denotes the size of the memory addressed by the table. We have been using fixed values for the SL0 depending on the page size as we have a fixed IPA size. But since we are about to make it dynamic, we need to calculate the SL0 at runtime per VM. This patch adds a helper to compute the value of SL0 for a VM based on the IPA size. Cc: Marc Zyngier Cc: Christoffer Dall Reviewed-by: Eric Auger Signed-off-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_arm.h | 69 +++++++++++++++++++++++--------- arch/arm64/kvm/reset.c | 1 + 2 files changed, 50 insertions(+), 20 deletions(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index b236d90ca056..f913adb44f93 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -121,7 +121,6 @@ #define VTCR_EL2_IRGN0_WBWA TCR_IRGN0_WBWA #define VTCR_EL2_SL0_SHIFT 6 #define VTCR_EL2_SL0_MASK (3 << VTCR_EL2_SL0_SHIFT) -#define VTCR_EL2_SL0_LVL1 (1 << VTCR_EL2_SL0_SHIFT) #define VTCR_EL2_T0SZ_MASK 0x3f #define VTCR_EL2_VS_SHIFT 19 #define VTCR_EL2_VS_8BIT (0 << VTCR_EL2_VS_SHIFT) @@ -144,30 +143,60 @@ #define VTCR_EL2_COMMON_BITS (VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \ VTCR_EL2_IRGN0_WBWA | VTCR_EL2_RES1) +/* + * VTCR_EL2:SL0 indicates the entry level for Stage2 translation. + * Interestingly, it depends on the page size. + * See D.10.2.121, VTCR_EL2, in ARM DDI 0487C.a + * + * ----------------------------------------- + * | Entry level | 4K | 16K/64K | + * ------------------------------------------ + * | Level: 0 | 2 | - | + * ------------------------------------------ + * | Level: 1 | 1 | 2 | + * ------------------------------------------ + * | Level: 2 | 0 | 1 | + * ------------------------------------------ + * | Level: 3 | - | 0 | + * ------------------------------------------ + * + * The table roughly translates to : + * + * SL0(PAGE_SIZE, Entry_level) = TGRAN_SL0_BASE - Entry_Level + * + * Where TGRAN_SL0_BASE is a magic number depending on the page size: + * TGRAN_SL0_BASE(4K) = 2 + * TGRAN_SL0_BASE(16K) = 3 + * TGRAN_SL0_BASE(64K) = 3 + * provided we take care of ruling out the unsupported cases and + * Entry_Level = 4 - Number_of_levels. + * + */ #ifdef CONFIG_ARM64_64K_PAGES -/* - * Stage2 translation configuration: - * 64kB pages (TG0 = 1) - * 2 level page tables (SL = 1) - */ -#define VTCR_EL2_TGRAN_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SL0_LVL1) + +#define VTCR_EL2_TGRAN VTCR_EL2_TG0_64K +#define VTCR_EL2_TGRAN_SL0_BASE 3UL + #elif defined(CONFIG_ARM64_16K_PAGES) -/* - * Stage2 translation configuration: - * 16kB pages (TG0 = 2) - * 2 level page tables (SL = 1) - */ -#define VTCR_EL2_TGRAN_FLAGS (VTCR_EL2_TG0_16K | VTCR_EL2_SL0_LVL1) + +#define VTCR_EL2_TGRAN VTCR_EL2_TG0_16K +#define VTCR_EL2_TGRAN_SL0_BASE 3UL + #else /* 4K */ -/* - * Stage2 translation configuration: - * 4kB pages (TG0 = 0) - * 3 level page tables (SL = 1) - */ -#define VTCR_EL2_TGRAN_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SL0_LVL1) + +#define VTCR_EL2_TGRAN VTCR_EL2_TG0_4K +#define VTCR_EL2_TGRAN_SL0_BASE 2UL + #endif -#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS) +#define VTCR_EL2_LVLS_TO_SL0(levels) \ + ((VTCR_EL2_TGRAN_SL0_BASE - (4 - (levels))) << VTCR_EL2_SL0_SHIFT) +#define VTCR_EL2_SL0_TO_LVLS(sl0) \ + ((sl0) + 4 - VTCR_EL2_TGRAN_SL0_BASE) +#define VTCR_EL2_LVLS(vtcr) \ + VTCR_EL2_SL0_TO_LVLS(((vtcr) & VTCR_EL2_SL0_MASK) >> VTCR_EL2_SL0_SHIFT) + +#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN) /* * ARM VMSAv8-64 defines an algorithm for finding the translation table * descriptors in section D4.2.8 in ARM DDI 0487C.a. diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 616120c4176b..1ced1e37374e 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -160,6 +160,7 @@ int kvm_arm_config_vm(struct kvm *kvm, unsigned long type) if (phys_shift > KVM_PHYS_SHIFT) phys_shift = KVM_PHYS_SHIFT; vtcr |= VTCR_EL2_T0SZ(phys_shift); + vtcr |= VTCR_EL2_LVLS_TO_SL0(kvm_stage2_levels(kvm)); /* * Enable the Hardware Access Flag management, unconditionally From 13ac4bbcc457d3925b4031cc70e3031fd8b9c3b7 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Wed, 26 Sep 2018 17:32:49 +0100 Subject: [PATCH 041/204] kvm: arm64: Switch to per VM IPA limit Now that we can manage the stage2 page table per VM, switch the configuration details to per VM instance. The VTCR is updated with the values specific to the VM based on the configuration. We store the IPA size and the number of stage2 page table levels for the guest already in VTCR. Decode it back from the vtcr field wherever we need it. Cc: Marc Zyngier Cc: Christoffer Dall Signed-off-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_arm.h | 2 ++ arch/arm64/include/asm/kvm_mmu.h | 2 +- arch/arm64/include/asm/stage2_pgtable.h | 2 +- arch/arm64/kvm/reset.c | 2 +- 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index f913adb44f93..e4240568cc18 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -197,6 +197,8 @@ VTCR_EL2_SL0_TO_LVLS(((vtcr) & VTCR_EL2_SL0_MASK) >> VTCR_EL2_SL0_SHIFT) #define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN) +#define VTCR_EL2_IPA(vtcr) (64 - ((vtcr) & VTCR_EL2_T0SZ_MASK)) + /* * ARM VMSAv8-64 defines an algorithm for finding the translation table * descriptors in section D4.2.8 in ARM DDI 0487C.a. diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index ac3ca9690bad..77b1af9e64db 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -142,7 +142,7 @@ static inline unsigned long __kern_hyp_va(unsigned long v) */ #define KVM_PHYS_SHIFT (40) -#define kvm_phys_shift(kvm) KVM_PHYS_SHIFT +#define kvm_phys_shift(kvm) VTCR_EL2_IPA(kvm->arch.vtcr) #define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm)) #define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL)) diff --git a/arch/arm64/include/asm/stage2_pgtable.h b/arch/arm64/include/asm/stage2_pgtable.h index 36a0a1165003..c62fe118a898 100644 --- a/arch/arm64/include/asm/stage2_pgtable.h +++ b/arch/arm64/include/asm/stage2_pgtable.h @@ -43,7 +43,7 @@ */ #define stage2_pgtable_levels(ipa) ARM64_HW_PGTABLE_LEVELS((ipa) - 4) #define STAGE2_PGTABLE_LEVELS stage2_pgtable_levels(KVM_PHYS_SHIFT) -#define kvm_stage2_levels(kvm) stage2_pgtable_levels(kvm_phys_shift(kvm)) +#define kvm_stage2_levels(kvm) VTCR_EL2_LVLS(kvm->arch.vtcr) /* * With all the supported VA_BITs and 40bit guest IPA, the following condition diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 1ced1e37374e..2bf41e007390 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -160,7 +160,7 @@ int kvm_arm_config_vm(struct kvm *kvm, unsigned long type) if (phys_shift > KVM_PHYS_SHIFT) phys_shift = KVM_PHYS_SHIFT; vtcr |= VTCR_EL2_T0SZ(phys_shift); - vtcr |= VTCR_EL2_LVLS_TO_SL0(kvm_stage2_levels(kvm)); + vtcr |= VTCR_EL2_LVLS_TO_SL0(stage2_pgtable_levels(phys_shift)); /* * Enable the Hardware Access Flag management, unconditionally From 8ad50c8985d805923f52a80698010a0a5123c07d Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Wed, 26 Sep 2018 17:32:50 +0100 Subject: [PATCH 042/204] vgic: Add support for 52bit guest physical address Add support for handling 52bit guest physical address to the VGIC layer. So far we have limited the guest physical address to 48bits, by explicitly masking the upper bits. This patch removes the restriction. We do not have to check if the host supports 52bit as the gpa is always validated during an access. (e.g, kvm_{read/write}_guest, kvm_is_visible_gfn()). Also, the ITS table save-restore is also not affected with the enhancement. The DTE entries already store the bits[51:8] of the ITT_addr (with a 256byte alignment). Cc: Marc Zyngier Cc: Christoffer Dall Reviewed-by: Eric Auger Signed-off-by: Kristina Martsenko [ Macro clean ups, fix PROPBASER and PENDBASER accesses ] Signed-off-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- include/linux/irqchip/arm-gic-v3.h | 5 +++++ virt/kvm/arm/vgic/vgic-its.c | 36 +++++++++--------------------- virt/kvm/arm/vgic/vgic-mmio-v3.c | 2 -- 3 files changed, 15 insertions(+), 28 deletions(-) diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 8bdbb5f29494..74b0aa9c7499 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -357,6 +357,8 @@ #define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt) #define GITS_CBASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWb) +#define GITS_CBASER_ADDRESS(cbaser) ((cbaser) & GENMASK_ULL(51, 12)) + #define GITS_BASER_NR_REGS 8 #define GITS_BASER_VALID (1ULL << 63) @@ -388,6 +390,9 @@ #define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48) #define GITS_BASER_PHYS_52_to_48(phys) \ (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12) +#define GITS_BASER_ADDR_48_to_52(baser) \ + (((baser) & GENMASK_ULL(47, 16)) | (((baser) >> 12) & 0xf) << 48) + #define GITS_BASER_SHAREABILITY_SHIFT (10) #define GITS_BASER_InnerShareable \ GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index 12502251727e..eb2a390a6c86 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -241,13 +241,6 @@ static struct its_ite *find_ite(struct vgic_its *its, u32 device_id, list_for_each_entry(dev, &(its)->device_list, dev_list) \ list_for_each_entry(ite, &(dev)->itt_head, ite_list) -/* - * We only implement 48 bits of PA at the moment, although the ITS - * supports more. Let's be restrictive here. - */ -#define BASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16)) -#define CBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12)) - #define GIC_LPI_OFFSET 8192 #define VITS_TYPER_IDBITS 16 @@ -759,6 +752,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id, { int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K; u64 indirect_ptr, type = GITS_BASER_TYPE(baser); + phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser); int esz = GITS_BASER_ENTRY_SIZE(baser); int index; gfn_t gfn; @@ -783,7 +777,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id, if (id >= (l1_tbl_size / esz)) return false; - addr = BASER_ADDRESS(baser) + id * esz; + addr = base + id * esz; gfn = addr >> PAGE_SHIFT; if (eaddr) @@ -798,7 +792,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id, /* Each 1st level entry is represented by a 64-bit value. */ if (kvm_read_guest_lock(its->dev->kvm, - BASER_ADDRESS(baser) + index * sizeof(indirect_ptr), + base + index * sizeof(indirect_ptr), &indirect_ptr, sizeof(indirect_ptr))) return false; @@ -808,11 +802,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id, if (!(indirect_ptr & BIT_ULL(63))) return false; - /* - * Mask the guest physical address and calculate the frame number. - * Any address beyond our supported 48 bits of PA will be caught - * by the actual check in the final step. - */ + /* Mask the guest physical address and calculate the frame number. */ indirect_ptr &= GENMASK_ULL(51, 16); /* Find the address of the actual entry */ @@ -1304,9 +1294,6 @@ static u64 vgic_sanitise_its_baser(u64 reg) GITS_BASER_OUTER_CACHEABILITY_SHIFT, vgic_sanitise_outer_cacheability); - /* Bits 15:12 contain bits 51:48 of the PA, which we don't support. */ - reg &= ~GENMASK_ULL(15, 12); - /* We support only one (ITS) page size: 64K */ reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K; @@ -1325,11 +1312,8 @@ static u64 vgic_sanitise_its_cbaser(u64 reg) GITS_CBASER_OUTER_CACHEABILITY_SHIFT, vgic_sanitise_outer_cacheability); - /* - * Sanitise the physical address to be 64k aligned. - * Also limit the physical addresses to 48 bits. - */ - reg &= ~(GENMASK_ULL(51, 48) | GENMASK_ULL(15, 12)); + /* Sanitise the physical address to be 64k aligned. */ + reg &= ~GENMASK_ULL(15, 12); return reg; } @@ -1375,7 +1359,7 @@ static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its) if (!its->enabled) return; - cbaser = CBASER_ADDRESS(its->cbaser); + cbaser = GITS_CBASER_ADDRESS(its->cbaser); while (its->cwriter != its->creadr) { int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr, @@ -2233,7 +2217,7 @@ static int vgic_its_restore_device_tables(struct vgic_its *its) if (!(baser & GITS_BASER_VALID)) return 0; - l1_gpa = BASER_ADDRESS(baser); + l1_gpa = GITS_BASER_ADDR_48_to_52(baser); if (baser & GITS_BASER_INDIRECT) { l1_esz = GITS_LVL1_ENTRY_SIZE; @@ -2305,7 +2289,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its) { const struct vgic_its_abi *abi = vgic_its_get_abi(its); u64 baser = its->baser_coll_table; - gpa_t gpa = BASER_ADDRESS(baser); + gpa_t gpa = GITS_BASER_ADDR_48_to_52(baser); struct its_collection *collection; u64 val; size_t max_size, filled = 0; @@ -2354,7 +2338,7 @@ static int vgic_its_restore_collection_table(struct vgic_its *its) if (!(baser & GITS_BASER_VALID)) return 0; - gpa = BASER_ADDRESS(baser); + gpa = GITS_BASER_ADDR_48_to_52(baser); max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K; diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index a2a175b08b17..b3d1f0985117 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c @@ -364,7 +364,6 @@ static u64 vgic_sanitise_pendbaser(u64 reg) vgic_sanitise_outer_cacheability); reg &= ~PENDBASER_RES0_MASK; - reg &= ~GENMASK_ULL(51, 48); return reg; } @@ -382,7 +381,6 @@ static u64 vgic_sanitise_propbaser(u64 reg) vgic_sanitise_outer_cacheability); reg &= ~PROPBASER_RES0_MASK; - reg &= ~GENMASK_ULL(51, 48); return reg; } From bc1d7de8c550db2f8fd59458a07fefa863358b8d Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Wed, 26 Sep 2018 17:32:51 +0100 Subject: [PATCH 043/204] kvm: arm64: Add 52bit support for PAR to HPFAR conversoin Add support for handling 52bit addresses in PAR to HPFAR conversion. Instead of hardcoding the address limits, we now use PHYS_MASK_SHIFT. Cc: Marc Zyngier Cc: Christoffer Dall Reviewed-by: Eric Auger Signed-off-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_arm.h | 7 +++++++ arch/arm64/kvm/hyp/switch.c | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index e4240568cc18..f1330284720d 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -311,6 +311,13 @@ /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ #define HPFAR_MASK (~UL(0xf)) +/* + * We have + * PAR [PA_Shift - 1 : 12] = PA [PA_Shift - 1 : 12] + * HPFAR [PA_Shift - 9 : 4] = FIPA [PA_Shift - 1 : 12] + */ +#define PAR_TO_HPFAR(par) \ + (((par) & GENMASK_ULL(PHYS_MASK_SHIFT - 1, 12)) >> 8) #define kvm_arm_exception_type \ {0, "IRQ" }, \ diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 9d5ce1a3039a..7cc175c88a37 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -263,7 +263,7 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar) return false; /* Translation failed, back to guest */ /* Convert PAR to HPFAR format */ - *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4; + *hpfar = PAR_TO_HPFAR(tmp); return true; } From e199ad2bf5cf7a6d15747a97da4ec1084bd6aae1 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Mon, 10 Sep 2018 18:29:08 +1000 Subject: [PATCH 044/204] KVM: PPC: Validate all tces before updating tables The KVM TCE handlers are written in a way so they fail when either something went horribly wrong or the userspace did some obvious mistake such as passing a misaligned address. We are going to enhance the TCE checker to fail on attempts to map bigger IOMMU page than the underlying pinned memory so let's valitate TCE beforehand. This should cause no behavioral change. Signed-off-by: Alexey Kardashevskiy Reviewed-by: David Gibson Signed-off-by: Michael Ellerman --- arch/powerpc/kvm/book3s_64_vio.c | 18 ++++++++++++++++++ arch/powerpc/kvm/book3s_64_vio_hv.c | 4 ++++ 2 files changed, 22 insertions(+) diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 9a3f2646ecc7..3c17977ffea6 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -599,6 +599,24 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, ret = kvmppc_tce_validate(stt, tce); if (ret != H_SUCCESS) goto unlock_exit; + } + + for (i = 0; i < npages; ++i) { + /* + * This looks unsafe, because we validate, then regrab + * the TCE from userspace which could have been changed by + * another thread. + * + * But it actually is safe, because the relevant checks will be + * re-executed in the following code. If userspace tries to + * change this dodgily it will result in a messier failure mode + * but won't threaten the host. + */ + if (get_user(tce, tces + i)) { + ret = H_TOO_HARD; + goto unlock_exit; + } + tce = be64_to_cpu(tce); if (kvmppc_gpa_to_ua(vcpu->kvm, tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 6821ead4b4eb..c2848e0b1b71 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -524,6 +524,10 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, ret = kvmppc_tce_validate(stt, tce); if (ret != H_SUCCESS) goto unlock_exit; + } + + for (i = 0; i < npages; ++i) { + unsigned long tce = be64_to_cpu(((u64 *)tces)[i]); ua = 0; if (kvmppc_gpa_to_ua(vcpu->kvm, From f7960e299f13f069d6f3d4e157d91bfca2669677 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Mon, 10 Sep 2018 18:29:09 +1000 Subject: [PATCH 045/204] KVM: PPC: Inform the userspace about TCE update failures We return H_TOO_HARD from TCE update handlers when we think that the next handler (realmode -> virtual mode -> user mode) has a chance to handle the request; H_HARDWARE/H_CLOSED otherwise. This changes the handlers to return H_TOO_HARD on every error giving the userspace an opportunity to handle any request or at least log them all. Signed-off-by: Alexey Kardashevskiy Reviewed-by: David Gibson Signed-off-by: Michael Ellerman --- arch/powerpc/kvm/book3s_64_vio.c | 8 ++++---- arch/powerpc/kvm/book3s_64_vio_hv.c | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 3c17977ffea6..984cec822a98 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -401,7 +401,7 @@ static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm, long ret; if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir))) - return H_HARDWARE; + return H_TOO_HARD; if (dir == DMA_NONE) return H_SUCCESS; @@ -449,15 +449,15 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, return H_TOO_HARD; if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa))) - return H_HARDWARE; + return H_TOO_HARD; if (mm_iommu_mapped_inc(mem)) - return H_CLOSED; + return H_TOO_HARD; ret = iommu_tce_xchg(tbl, entry, &hpa, &dir); if (WARN_ON_ONCE(ret)) { mm_iommu_mapped_dec(mem); - return H_HARDWARE; + return H_TOO_HARD; } if (dir != DMA_NONE) diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index c2848e0b1b71..7388b660e648 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -300,10 +300,10 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift, &hpa))) - return H_HARDWARE; + return H_TOO_HARD; if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem))) - return H_CLOSED; + return H_TOO_HARD; ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir); if (ret) { @@ -501,7 +501,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, rmap = (void *) vmalloc_to_phys(rmap); if (WARN_ON_ONCE_RM(!rmap)) - return H_HARDWARE; + return H_TOO_HARD; /* * Synchronize with the MMU notifier callbacks in From 0f62f0e95be29200ab2ab98ca870e22c9b148dfa Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Wed, 26 Sep 2018 17:32:52 +0100 Subject: [PATCH 046/204] kvm: arm64: Set a limit on the IPA size So far we have restricted the IPA size of the VM to the default value (40bits). Now that we can manage the IPA size per VM and support dynamic stage2 page tables, we can allow VMs to have larger IPA. This patch introduces a the maximum IPA size supported on the host. This is decided by the following factors : 1) Maximum PARange supported by the CPUs - This can be inferred from the system wide safe value. 2) Maximum PA size supported by the host kernel (48 vs 52) 3) Number of levels in the host page table (as we base our stage2 tables on the host table helpers). Since the stage2 page table code is dependent on the stage1 page table, we always ensure that : Number of Levels at Stage1 >= Number of Levels at Stage2 So we limit the IPA to make sure that the above condition is satisfied. This will affect the following combinations of VA_BITS and IPA for different page sizes. Host configuration | Unsupported IPA ranges 39bit VA, 4K | [44, 48] 36bit VA, 16K | [41, 48] 42bit VA, 64K | [47, 52] Supporting the above combinations need independent stage2 page table manipulation code, which would need substantial changes. We could purse the solution independently and switch the page table code once we have it ready. Cc: Catalin Marinas Cc: Marc Zyngier Cc: Christoffer Dall Reviewed-by: Eric Auger Signed-off-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_mmu.h | 2 ++ arch/arm64/include/asm/kvm_host.h | 12 +++------ arch/arm64/kvm/reset.c | 43 +++++++++++++++++++++++++++++++ virt/kvm/arm/arm.c | 2 ++ 4 files changed, 50 insertions(+), 9 deletions(-) diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 12ae5fbbcf01..5ad1a54f98dc 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -358,6 +358,8 @@ static inline int hyp_map_aux_data(void) #define kvm_phys_to_vttbr(addr) (addr) +static inline void kvm_set_ipa_limit(void) {} + #endif /* !__ASSEMBLY__ */ #endif /* __ARM_KVM_MMU_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 5ecd457bce7d..f008f8866b2a 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -442,15 +442,7 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); -static inline void __cpu_init_stage2(void) -{ - u32 ps; - - /* Sanity check for minimum IPA size support */ - ps = id_aa64mmfr0_parange_to_phys_shift(read_sysreg(id_aa64mmfr0_el1) & 0x7); - WARN_ONCE(ps < 40, - "PARange is %d bits, unsupported configuration!", ps); -} +static inline void __cpu_init_stage2(void) {} /* Guest/host FPSIMD coordination helpers */ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); @@ -513,6 +505,8 @@ static inline int kvm_arm_have_ssbd(void) void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu); void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu); +void kvm_set_ipa_limit(void); + #define __KVM_HAVE_ARCH_VM_ALLOC struct kvm *kvm_arch_alloc_vm(void); void kvm_arch_free_vm(struct kvm *kvm); diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 2bf41e007390..96b3f50101bc 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -34,6 +34,9 @@ #include #include +/* Maximum phys_shift supported for any VM on this host */ +static u32 kvm_ipa_limit; + /* * ARMv8 Reset Values */ @@ -135,6 +138,46 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) return kvm_timer_vcpu_reset(vcpu); } +void kvm_set_ipa_limit(void) +{ + unsigned int ipa_max, pa_max, va_max, parange; + + parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 0x7; + pa_max = id_aa64mmfr0_parange_to_phys_shift(parange); + + /* Clamp the IPA limit to the PA size supported by the kernel */ + ipa_max = (pa_max > PHYS_MASK_SHIFT) ? PHYS_MASK_SHIFT : pa_max; + /* + * Since our stage2 table is dependent on the stage1 page table code, + * we must always honor the following condition: + * + * Number of levels in Stage1 >= Number of levels in Stage2. + * + * So clamp the ipa limit further down to limit the number of levels. + * Since we can concatenate upto 16 tables at entry level, we could + * go upto 4bits above the maximum VA addressible with the current + * number of levels. + */ + va_max = PGDIR_SHIFT + PAGE_SHIFT - 3; + va_max += 4; + + if (va_max < ipa_max) + ipa_max = va_max; + + /* + * If the final limit is lower than the real physical address + * limit of the CPUs, report the reason. + */ + if (ipa_max < pa_max) + pr_info("kvm: Limiting the IPA size due to kernel %s Address limit\n", + (va_max < pa_max) ? "Virtual" : "Physical"); + + WARN(ipa_max < KVM_PHYS_SHIFT, + "KVM IPA limit (%d bit) is smaller than default size\n", ipa_max); + kvm_ipa_limit = ipa_max; + kvm_info("IPA Size Limit: %dbits\n", kvm_ipa_limit); +} + /* * Configure the VTCR_EL2 for this VM. The VTCR value is common * across all the physical CPUs on the system. We use system wide diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 43e716bc3f08..631f9a3ad99a 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -1413,6 +1413,8 @@ static int init_common_resources(void) kvm_vmid_bits = kvm_get_vmid_bits(); kvm_info("%d-bit VMID\n", kvm_vmid_bits); + kvm_set_ipa_limit(); + return 0; } From 58b3efc820acd3219e89ff014e93346a734229b8 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Wed, 26 Sep 2018 17:32:53 +0100 Subject: [PATCH 047/204] kvm: arm64: Limit the minimum number of page table levels Since we are about to remove the lower limit on the IPA size, make sure that we do not go to 1 level page table (e.g, with 32bit IPA on 64K host with concatenation) to avoid splitting the host PMD huge pages at stage2. Cc: Marc Zyngier Cc: Christoffer Dall Reviewed-by: Eric Auger Signed-off-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/stage2_pgtable.h | 7 ++++++- arch/arm64/kvm/reset.c | 10 +++++++++- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/stage2_pgtable.h b/arch/arm64/include/asm/stage2_pgtable.h index c62fe118a898..2cce769ba4c6 100644 --- a/arch/arm64/include/asm/stage2_pgtable.h +++ b/arch/arm64/include/asm/stage2_pgtable.h @@ -72,8 +72,13 @@ /* * The number of PTRS across all concatenated stage2 tables given by the * number of bits resolved at the initial level. + * If we force more levels than necessary, we may have (stage2_pgdir_shift > IPA), + * in which case, stage2_pgd_ptrs will have one entry. */ -#define __s2_pgd_ptrs(ipa, lvls) (1 << ((ipa) - pt_levels_pgdir_shift((lvls)))) +#define pgd_ptrs_shift(ipa, pgdir_shift) \ + ((ipa) > (pgdir_shift) ? ((ipa) - (pgdir_shift)) : 0) +#define __s2_pgd_ptrs(ipa, lvls) \ + (1 << (pgd_ptrs_shift((ipa), pt_levels_pgdir_shift(lvls)))) #define __s2_pgd_size(ipa, lvls) (__s2_pgd_ptrs((ipa), (lvls)) * sizeof(pgd_t)) #define stage2_pgd_ptrs(kvm) __s2_pgd_ptrs(kvm_phys_shift(kvm), kvm_stage2_levels(kvm)) diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 96b3f50101bc..f156e45760bc 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -190,6 +190,7 @@ int kvm_arm_config_vm(struct kvm *kvm, unsigned long type) { u64 vtcr = VTCR_EL2_FLAGS; u32 parange, phys_shift; + u8 lvls; if (type) return -EINVAL; @@ -203,7 +204,14 @@ int kvm_arm_config_vm(struct kvm *kvm, unsigned long type) if (phys_shift > KVM_PHYS_SHIFT) phys_shift = KVM_PHYS_SHIFT; vtcr |= VTCR_EL2_T0SZ(phys_shift); - vtcr |= VTCR_EL2_LVLS_TO_SL0(stage2_pgtable_levels(phys_shift)); + /* + * Use a minimum 2 level page table to prevent splitting + * host PMD huge pages at stage2. + */ + lvls = stage2_pgtable_levels(phys_shift); + if (lvls < 2) + lvls = 2; + vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls); /* * Enable the Hardware Access Flag management, unconditionally From 233a7cb235318223df8133235383f4c595c654c1 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Wed, 26 Sep 2018 17:32:54 +0100 Subject: [PATCH 048/204] kvm: arm64: Allow tuning the physical address size for VM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allow specifying the physical address size limit for a new VM via the kvm_type argument for the KVM_CREATE_VM ioctl. This allows us to finalise the stage2 page table as early as possible and hence perform the right checks on the memory slots without complication. The size is encoded as Log2(PA_Size) in bits[7:0] of the type field. For backward compatibility the value 0 is reserved and implies 40bits. Also, lift the limit of the IPA to host limit and allow lower IPA sizes (e.g, 32). The userspace could check the extension KVM_CAP_ARM_VM_IPA_SIZE for the availability of this feature. The cap check returns the maximum limit for the physical address shift supported by the host. Cc: Marc Zyngier Cc: Christoffer Dall Cc: Peter Maydell Cc: Paolo Bonzini Cc: Radim Krčmář Reviewed-by: Eric Auger Signed-off-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- Documentation/virtual/kvm/api.txt | 31 +++++++++++++++++++++++++ arch/arm64/include/asm/stage2_pgtable.h | 20 ---------------- arch/arm64/kvm/reset.c | 17 ++++++++++---- include/uapi/linux/kvm.h | 10 ++++++++ 4 files changed, 54 insertions(+), 24 deletions(-) diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 647f94128a85..f6b0af55d010 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -123,6 +123,37 @@ memory layout to fit in user mode), check KVM_CAP_MIPS_VZ and use the flag KVM_VM_MIPS_VZ. +On arm64, the physical address size for a VM (IPA Size limit) is limited +to 40bits by default. The limit can be configured if the host supports the +extension KVM_CAP_ARM_VM_IPA_SIZE. When supported, use +KVM_VM_TYPE_ARM_IPA_SIZE(IPA_Bits) to set the size in the machine type +identifier, where IPA_Bits is the maximum width of any physical +address used by the VM. The IPA_Bits is encoded in bits[7-0] of the +machine type identifier. + +e.g, to configure a guest to use 48bit physical address size : + + vm_fd = ioctl(dev_fd, KVM_CREATE_VM, KVM_VM_TYPE_ARM_IPA_SIZE(48)); + +The requested size (IPA_Bits) must be : + 0 - Implies default size, 40bits (for backward compatibility) + + or + + N - Implies N bits, where N is a positive integer such that, + 32 <= N <= Host_IPA_Limit + +Host_IPA_Limit is the maximum possible value for IPA_Bits on the host and +is dependent on the CPU capability and the kernel configuration. The limit can +be retrieved using KVM_CAP_ARM_VM_IPA_SIZE of the KVM_CHECK_EXTENSION +ioctl() at run-time. + +Please note that configuring the IPA size does not affect the capability +exposed by the guest CPUs in ID_AA64MMFR0_EL1[PARange]. It only affects +size of the address translated by the stage2 level (guest physical to +host physical address translations). + + 4.3 KVM_GET_MSR_INDEX_LIST, KVM_GET_MSR_FEATURE_INDEX_LIST Capability: basic, KVM_CAP_GET_MSR_FEATURES for KVM_GET_MSR_FEATURE_INDEX_LIST diff --git a/arch/arm64/include/asm/stage2_pgtable.h b/arch/arm64/include/asm/stage2_pgtable.h index 2cce769ba4c6..d352f6df8d2c 100644 --- a/arch/arm64/include/asm/stage2_pgtable.h +++ b/arch/arm64/include/asm/stage2_pgtable.h @@ -42,28 +42,8 @@ * the range (IPA_SHIFT, IPA_SHIFT - 4). */ #define stage2_pgtable_levels(ipa) ARM64_HW_PGTABLE_LEVELS((ipa) - 4) -#define STAGE2_PGTABLE_LEVELS stage2_pgtable_levels(KVM_PHYS_SHIFT) #define kvm_stage2_levels(kvm) VTCR_EL2_LVLS(kvm->arch.vtcr) -/* - * With all the supported VA_BITs and 40bit guest IPA, the following condition - * is always true: - * - * STAGE2_PGTABLE_LEVELS <= CONFIG_PGTABLE_LEVELS - * - * We base our stage-2 page table walker helpers on this assumption and - * fall back to using the host version of the helper wherever possible. - * i.e, if a particular level is not folded (e.g, PUD) at stage2, we fall back - * to using the host version, since it is guaranteed it is not folded at host. - * - * If the condition breaks in the future, we can rearrange the host level - * definitions and reuse them for stage2. Till then... - */ -#if STAGE2_PGTABLE_LEVELS > CONFIG_PGTABLE_LEVELS -#error "Unsupported combination of guest IPA and host VA_BITS." -#endif - - /* stage2_pgdir_shift() is the size mapped by top-level stage2 entry for the VM */ #define stage2_pgdir_shift(kvm) pt_levels_pgdir_shift(kvm_stage2_levels(kvm)) #define stage2_pgdir_size(kvm) (1ULL << stage2_pgdir_shift(kvm)) diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index f156e45760bc..95f28d5950e0 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -89,6 +89,9 @@ int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_VCPU_EVENTS: r = 1; break; + case KVM_CAP_ARM_VM_IPA_SIZE: + r = kvm_ipa_limit; + break; default: r = 0; } @@ -192,17 +195,23 @@ int kvm_arm_config_vm(struct kvm *kvm, unsigned long type) u32 parange, phys_shift; u8 lvls; - if (type) + if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) return -EINVAL; + phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type); + if (phys_shift) { + if (phys_shift > kvm_ipa_limit || + phys_shift < 32) + return -EINVAL; + } else { + phys_shift = KVM_PHYS_SHIFT; + } + parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 7; if (parange > ID_AA64MMFR0_PARANGE_MAX) parange = ID_AA64MMFR0_PARANGE_MAX; vtcr |= parange << VTCR_EL2_PS_SHIFT; - phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange); - if (phys_shift > KVM_PHYS_SHIFT) - phys_shift = KVM_PHYS_SHIFT; vtcr |= VTCR_EL2_T0SZ(phys_shift); /* * Use a minimum 2 level page table to prevent splitting diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 251be353f950..95aa73ca65dc 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -750,6 +750,15 @@ struct kvm_ppc_resize_hpt { #define KVM_S390_SIE_PAGE_OFFSET 1 +/* + * On arm64, machine type can be used to request the physical + * address size for the VM. Bits[7-0] are reserved for the guest + * PA size shift (i.e, log2(PA_Size)). For backward compatibility, + * value 0 implies the default IPA size, 40bits. + */ +#define KVM_VM_TYPE_ARM_IPA_SIZE_MASK 0xffULL +#define KVM_VM_TYPE_ARM_IPA_SIZE(x) \ + ((x) & KVM_VM_TYPE_ARM_IPA_SIZE_MASK) /* * ioctls for /dev/kvm fds: */ @@ -953,6 +962,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_NESTED_STATE 157 #define KVM_CAP_ARM_INJECT_SERROR_ESR 158 #define KVM_CAP_MSR_PLATFORM_INFO 159 +#define KVM_CAP_ARM_VM_IPA_SIZE 160 /* returns maximum IPA bits for a VM */ #ifdef KVM_CAP_IRQ_ROUTING From bca607ebc76af9540e4aad5b2241a7323354be43 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 1 Oct 2018 13:40:36 +0100 Subject: [PATCH 049/204] KVM: arm/arm64: Rename kvm_arm_config_vm to kvm_arm_setup_stage2 VM tends to be a very overloaded term in KVM, so let's keep it to describe the virtual machine. For the virtual memory setup, let's use the "stage2" suffix. Reviewed-by: Eric Auger Reviewed-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 6 +++++- arch/arm64/include/asm/kvm_arm.h | 2 +- arch/arm64/include/asm/kvm_host.h | 2 +- arch/arm64/kvm/reset.c | 2 +- virt/kvm/arm/arm.c | 2 +- 5 files changed, 9 insertions(+), 5 deletions(-) diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 72d46418e1ef..b45af481ccf7 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -354,8 +354,12 @@ static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {} struct kvm *kvm_arch_alloc_vm(void); void kvm_arch_free_vm(struct kvm *kvm); -static inline int kvm_arm_config_vm(struct kvm *kvm, unsigned long type) +static inline int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) { + /* + * On 32bit ARM, VMs get a static 40bit IPA stage2 setup, + * so any non-zero value used as type is illegal. + */ if (type) return -EINVAL; return 0; diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index f1330284720d..6e324d1f1231 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -133,7 +133,7 @@ * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are * not known to exist and will break with this configuration. * - * The VTCR_EL2 is configured per VM and is initialised in kvm_arm_config_vm(). + * The VTCR_EL2 is configured per VM and is initialised in kvm_arm_setup_stage2(). * * Note that when using 4K pages, we concatenate two first level page tables * together. With 16K pages, we concatenate 16 first level page tables. diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f008f8866b2a..376a5b695467 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -511,6 +511,6 @@ void kvm_set_ipa_limit(void); struct kvm *kvm_arch_alloc_vm(void); void kvm_arch_free_vm(struct kvm *kvm); -int kvm_arm_config_vm(struct kvm *kvm, unsigned long type); +int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type); #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 95f28d5950e0..aa806d582552 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -189,7 +189,7 @@ void kvm_set_ipa_limit(void) * all CPUs, as it is safe to run with or without the feature and * the bit is RES0 on CPUs that don't support it. */ -int kvm_arm_config_vm(struct kvm *kvm, unsigned long type) +int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) { u64 vtcr = VTCR_EL2_FLAGS; u32 parange, phys_shift; diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 631f9a3ad99a..91c464c9cd21 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -120,7 +120,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { int ret, cpu; - ret = kvm_arm_config_vm(kvm, type); + ret = kvm_arm_setup_stage2(kvm, type); if (ret) return ret; From 9d47bb0d9ea8528373b4c6f9bca6c7f402900297 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 1 Oct 2018 13:41:32 +0100 Subject: [PATCH 050/204] KVM: arm64: Drop __cpu_init_stage2 on the VHE path __cpu_init_stage2 doesn't do anything anymore on arm64, and is totally non-sensical if running VHE (as VHE is 64bit only). Reviewed-by: Eric Auger Reviewed-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- virt/kvm/arm/arm.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 91c464c9cd21..4ce99bb223bc 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -1310,16 +1310,10 @@ static void cpu_hyp_reinit(void) { cpu_hyp_reset(); - if (is_kernel_in_hyp_mode()) { - /* - * __cpu_init_stage2() is safe to call even if the PM - * event was cancelled before the CPU was reset. - */ - __cpu_init_stage2(); + if (is_kernel_in_hyp_mode()) kvm_timer_init_vhe(); - } else { + else cpu_init_hyp_mode(NULL); - } if (vgic_present) kvm_vgic_init_cpu_hardware(); From f0725345e3e127032376e4fcb6b0fc893237fcef Mon Sep 17 00:00:00 2001 From: zhong jiang Date: Thu, 9 Aug 2018 22:20:41 +0800 Subject: [PATCH 051/204] arm64: KVM: Remove some extra semicolon in kvm_target_cpu There are some extra semicolon in kvm_target_cpu, remove it. Signed-off-by: zhong jiang Signed-off-by: Marc Zyngier --- arch/arm64/kvm/guest.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 07256b08226c..a74f84d09412 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -338,15 +338,15 @@ int __attribute_const__ kvm_target_cpu(void) return KVM_ARM_TARGET_CORTEX_A53; case ARM_CPU_PART_CORTEX_A57: return KVM_ARM_TARGET_CORTEX_A57; - }; + } break; case ARM_CPU_IMP_APM: switch (part_number) { case APM_CPU_PART_POTENZA: return KVM_ARM_TARGET_XGENE_POTENZA; - }; + } break; - }; + } /* Return a default generic target */ return KVM_ARM_TARGET_GENERIC_V8; From fd2ef358282c849c193aa36dadbf4f07f7dcd29b Mon Sep 17 00:00:00 2001 From: Punit Agrawal Date: Mon, 1 Oct 2018 16:54:35 +0100 Subject: [PATCH 052/204] KVM: arm/arm64: Ensure only THP is candidate for adjustment PageTransCompoundMap() returns true for hugetlbfs and THP hugepages. This behaviour incorrectly leads to stage 2 faults for unsupported hugepage sizes (e.g., 64K hugepage with 4K pages) to be treated as THP faults. Tighten the check to filter out hugetlbfs pages. This also leads to consistently mapping all unsupported hugepage sizes as PTE level entries at stage 2. Signed-off-by: Punit Agrawal Reviewed-by: Suzuki Poulose Cc: Christoffer Dall Cc: Marc Zyngier Cc: stable@vger.kernel.org # v4.13+ Signed-off-by: Marc Zyngier --- virt/kvm/arm/mmu.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 7e477b3cae5b..c23a1b323aad 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -1231,8 +1231,14 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap) { kvm_pfn_t pfn = *pfnp; gfn_t gfn = *ipap >> PAGE_SHIFT; + struct page *page = pfn_to_page(pfn); - if (PageTransCompoundMap(pfn_to_page(pfn))) { + /* + * PageTransCompoungMap() returns true for THP and + * hugetlbfs. Make sure the adjustment is done only for THP + * pages. + */ + if (!PageHuge(page) && PageTransCompoundMap(page)) { unsigned long mask; /* * The address we faulted on is backed by a transparent huge From 1006284c5e411872333967b1970c2ca46a9e225f Mon Sep 17 00:00:00 2001 From: Cameron Kaiser Date: Tue, 31 Jul 2018 07:39:21 -0700 Subject: [PATCH 053/204] KVM: PPC: Book3S PR: Exiting split hack mode needs to fixup both PC and LR When an OS (currently only classic Mac OS) is running in KVM-PR and makes a linked jump from code with split hack addressing enabled into code that does not, LR is not correctly updated and reflects the previously munged PC. To fix this, this patch undoes the address munge when exiting split hack mode so that code relying on LR being a proper address will now execute. This does not affect OS X or other operating systems running on KVM-PR. Signed-off-by: Cameron Kaiser Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 87348e498c89..9600d740ebbf 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -78,8 +78,11 @@ void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu) { if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) { ulong pc = kvmppc_get_pc(vcpu); + ulong lr = kvmppc_get_lr(vcpu); if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK); + if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) + kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK); vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK; } } From aa2278644ae54ff762ce33f9c9563d759e9cca9f Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 12 Sep 2018 10:42:12 +1000 Subject: [PATCH 054/204] KVM: PPC: Book3S HV: Provide mode where all vCPUs on a core must be the same VM This adds a mode where the vcore scheduling logic in HV KVM limits itself to scheduling only virtual cores from the same VM on any given physical core. This is enabled via a new module parameter on the kvm-hv module called "one_vm_per_core". For this to work on POWER9, it is necessary to set indep_threads_mode=N. (On POWER8, hardware limitations mean that KVM is never in independent threads mode, regardless of the indep_threads_mode setting.) Thus the settings needed for this to work are: 1. The host is in SMT1 mode. 2. On POWER8, the host is not in 2-way or 4-way static split-core mode. 3. On POWER9, the indep_threads_mode parameter is N. 4. The one_vm_per_core parameter is Y. With these settings, KVM can run up to 4 vcpus on a core at the same time on POWER9, or up to 8 vcpus on POWER8 (depending on the guest threading mode), and will ensure that all of the vcpus belong to the same VM. This is intended for use in security-conscious settings where users are concerned about possible side-channel attacks between threads which could perhaps enable one VM to attack another VM on the same core, or the host. Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 3e3a71594e63..4fd04b3a058b 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -104,6 +104,10 @@ static bool indep_threads_mode = true; module_param(indep_threads_mode, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(indep_threads_mode, "Independent-threads mode (only on POWER9)"); +static bool one_vm_per_core; +module_param(one_vm_per_core, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(one_vm_per_core, "Only run vCPUs from the same VM on a core (requires indep_threads_mode=N)"); + #ifdef CONFIG_KVM_XICS static struct kernel_param_ops module_param_ops = { .set = param_set_int, @@ -2493,6 +2497,10 @@ static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip) if (!cpu_has_feature(CPU_FTR_ARCH_207S)) return false; + /* In one_vm_per_core mode, require all vcores to be from the same vm */ + if (one_vm_per_core && vc->kvm != cip->vc[0]->kvm) + return false; + /* Some POWER9 chips require all threads to be in the same MMU mode */ if (no_mixing_hpt_and_radix && kvm_is_radix(vc->kvm) != kvm_is_radix(cip->vc[0]->kvm)) From 8e41bd54317b04f2bf03012a4ca8ab7360c9beef Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Thu, 4 Oct 2018 14:42:43 +0200 Subject: [PATCH 055/204] KVM: s390: fix locking for crypto setting error path We need to unlock the kvm->lock mutex in the error case. Reported-by: smatch Fixes: 37940fb0b6a2c4bf101 ("KVM: s390: device attrs to enable/disable AP interpretation") Signed-off-by: Christian Borntraeger Reviewed-by: Janosch Frank Reviewed-by: Pierre Morel Reviewed-by: David Hildenbrand Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index a6230b00c1df..734d87d88eb3 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -859,8 +859,10 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) mutex_lock(&kvm->lock); switch (attr->attr) { case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: - if (!test_kvm_facility(kvm, 76)) + if (!test_kvm_facility(kvm, 76)) { + mutex_unlock(&kvm->lock); return -EINVAL; + } get_random_bytes( kvm->arch.crypto.crycb->aes_wrapping_key_mask, sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); @@ -868,8 +870,10 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support"); break; case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: - if (!test_kvm_facility(kvm, 76)) + if (!test_kvm_facility(kvm, 76)) { + mutex_unlock(&kvm->lock); return -EINVAL; + } get_random_bytes( kvm->arch.crypto.crycb->dea_wrapping_key_mask, sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); @@ -877,16 +881,20 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support"); break; case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: - if (!test_kvm_facility(kvm, 76)) + if (!test_kvm_facility(kvm, 76)) { + mutex_unlock(&kvm->lock); return -EINVAL; + } kvm->arch.crypto.aes_kw = 0; memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support"); break; case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: - if (!test_kvm_facility(kvm, 76)) + if (!test_kvm_facility(kvm, 76)) { + mutex_unlock(&kvm->lock); return -EINVAL; + } kvm->arch.crypto.dea_kw = 0; memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); From 0e237e44699465139c07f969b051f83066a2ec1d Mon Sep 17 00:00:00 2001 From: Pierre Morel Date: Fri, 5 Oct 2018 10:31:09 +0200 Subject: [PATCH 056/204] KVM: s390: Tracing APCB changes kvm_arch_crypto_set_masks is a new function to centralize the setup the APCB masks inside the CRYCB SIE satellite. To trace APCB mask changes, we add KVM_EVENT() tracing to both kvm_arch_crypto_set_masks and kvm_arch_crypto_clear_masks. Signed-off-by: Pierre Morel Message-Id: <1538728270-10340-2-git-send-email-pmorel@linux.ibm.com> Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 2 ++ arch/s390/kvm/kvm-s390.c | 41 ++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 36d35313e840..22aa4da91f7a 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -861,6 +861,8 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work); void kvm_arch_crypto_clear_masks(struct kvm *kvm); +void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, + unsigned long *aqm, unsigned long *adm); extern int sie64a(struct kvm_s390_sie_block *, u64 *); extern char sie_exit; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 734d87d88eb3..22a320a9a00d 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2064,6 +2064,46 @@ static void kvm_s390_set_crycb_format(struct kvm *kvm) kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; } +void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, + unsigned long *aqm, unsigned long *adm) +{ + struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; + + mutex_lock(&kvm->lock); + kvm_s390_vcpu_block_all(kvm); + + switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { + case CRYCB_FORMAT2: /* APCB1 use 256 bits */ + memcpy(crycb->apcb1.apm, apm, 32); + VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx", + apm[0], apm[1], apm[2], apm[3]); + memcpy(crycb->apcb1.aqm, aqm, 32); + VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx", + aqm[0], aqm[1], aqm[2], aqm[3]); + memcpy(crycb->apcb1.adm, adm, 32); + VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx", + adm[0], adm[1], adm[2], adm[3]); + break; + case CRYCB_FORMAT1: + case CRYCB_FORMAT0: /* Fall through both use APCB0 */ + memcpy(crycb->apcb0.apm, apm, 8); + memcpy(crycb->apcb0.aqm, aqm, 2); + memcpy(crycb->apcb0.adm, adm, 2); + VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x", + apm[0], *((unsigned short *)aqm), + *((unsigned short *)adm)); + break; + default: /* Can not happen */ + break; + } + + /* recreate the shadow crycb for each vcpu */ + kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); + kvm_s390_vcpu_unblock_all(kvm); + mutex_unlock(&kvm->lock); +} +EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks); + void kvm_arch_crypto_clear_masks(struct kvm *kvm) { mutex_lock(&kvm->lock); @@ -2074,6 +2114,7 @@ void kvm_arch_crypto_clear_masks(struct kvm *kvm) memset(&kvm->arch.crypto.crycb->apcb1, 0, sizeof(kvm->arch.crypto.crycb->apcb1)); + VM_EVENT(kvm, 3, "%s", "CLR CRYCB:"); /* recreate the shadow crycb for each vcpu */ kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); kvm_s390_vcpu_unblock_all(kvm); From 76c7829f5b8c7691b18929cdedd6d2e79db3c2b9 Mon Sep 17 00:00:00 2001 From: Pierre Morel Date: Fri, 5 Oct 2018 10:31:10 +0200 Subject: [PATCH 057/204] s390: vfio-ap: setup APCB mask using KVM dedicated function We replace the vfio_ap_mdev_copy_masks() by the new kvm_arch_crypto_set_masks() to be able to use the standard KVM tracing system. Signed-off-by: Pierre Morel Message-Id: <1538728270-10340-3-git-send-email-pmorel@linux.ibm.com> Reviewed-by: Cornelia Huck Reviewed-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- drivers/s390/crypto/vfio_ap_ops.c | 35 +++---------------------------- 1 file changed, 3 insertions(+), 32 deletions(-) diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index d3d9eb72b0f1..ea99165d1045 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -727,37 +727,6 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = { NULL }; -static void vfio_ap_mdev_copy_masks(struct ap_matrix_mdev *matrix_mdev) -{ - int nbytes; - unsigned long *apm, *aqm, *adm; - struct kvm_s390_crypto_cb *crycb = matrix_mdev->kvm->arch.crypto.crycb; - - switch (matrix_mdev->kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { - case CRYCB_FORMAT2: - apm = (unsigned long *)crycb->apcb1.apm; - aqm = (unsigned long *)crycb->apcb1.aqm; - adm = (unsigned long *)crycb->apcb1.adm; - break; - case CRYCB_FORMAT1: - case CRYCB_FORMAT0: - apm = (unsigned long *)crycb->apcb0.apm; - aqm = (unsigned long *)crycb->apcb0.aqm; - adm = (unsigned long *)crycb->apcb0.adm; - break; - default: - /* cannot happen */ - return; - } - - nbytes = DIV_ROUND_UP(matrix_mdev->matrix.apm_max + 1, BITS_PER_BYTE); - memcpy(apm, matrix_mdev->matrix.apm, nbytes); - nbytes = DIV_ROUND_UP(matrix_mdev->matrix.aqm_max + 1, BITS_PER_BYTE); - memcpy(aqm, matrix_mdev->matrix.aqm, nbytes); - nbytes = DIV_ROUND_UP(matrix_mdev->matrix.adm_max + 1, BITS_PER_BYTE); - memcpy(adm, matrix_mdev->matrix.adm, nbytes); -} - /** * vfio_ap_mdev_set_kvm * @@ -814,7 +783,9 @@ static int vfio_ap_mdev_group_notifier(struct notifier_block *nb, if (!matrix_mdev->kvm->arch.crypto.crycbd) return NOTIFY_DONE; - vfio_ap_mdev_copy_masks(matrix_mdev); + kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm, + matrix_mdev->matrix.aqm, + matrix_mdev->matrix.adm); return NOTIFY_OK; } From 46623ab3194a3f37ea12da2964bca3b35fdd20ed Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Fri, 5 Oct 2018 19:22:38 +0200 Subject: [PATCH 058/204] s390: vfio-ap: make local functions and data static no functional change, just hygiene. Signed-off-by: Christian Borntraeger Reviewed-by: Cornelia Huck Reviewed-by: David Hildenbrand --- drivers/s390/crypto/vfio_ap_drv.c | 4 ++-- drivers/s390/crypto/vfio_ap_ops.c | 20 ++++++++++---------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c index 8b51821d9bf7..7667b38728f0 100644 --- a/drivers/s390/crypto/vfio_ap_drv.c +++ b/drivers/s390/crypto/vfio_ap_drv.c @@ -112,7 +112,7 @@ static void vfio_ap_matrix_dev_destroy(void) root_device_unregister(matrix_dev->device.parent); } -int __init vfio_ap_init(void) +static int __init vfio_ap_init(void) { int ret; @@ -146,7 +146,7 @@ int __init vfio_ap_init(void) return 0; } -void __exit vfio_ap_exit(void) +static void __exit vfio_ap_exit(void) { vfio_ap_mdev_unregister(); ap_driver_unregister(&vfio_ap_drv); diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index ea99165d1045..272ef427dcc0 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -77,7 +77,7 @@ static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf) return sprintf(buf, "%s\n", VFIO_AP_MDEV_NAME_HWVIRT); } -MDEV_TYPE_ATTR_RO(name); +static MDEV_TYPE_ATTR_RO(name); static ssize_t available_instances_show(struct kobject *kobj, struct device *dev, char *buf) @@ -86,7 +86,7 @@ static ssize_t available_instances_show(struct kobject *kobj, atomic_read(&matrix_dev->available_instances)); } -MDEV_TYPE_ATTR_RO(available_instances); +static MDEV_TYPE_ATTR_RO(available_instances); static ssize_t device_api_show(struct kobject *kobj, struct device *dev, char *buf) @@ -94,7 +94,7 @@ static ssize_t device_api_show(struct kobject *kobj, struct device *dev, return sprintf(buf, "%s\n", VFIO_DEVICE_API_AP_STRING); } -MDEV_TYPE_ATTR_RO(device_api); +static MDEV_TYPE_ATTR_RO(device_api); static struct attribute *vfio_ap_mdev_type_attrs[] = { &mdev_type_attr_name.attr, @@ -395,7 +395,7 @@ static ssize_t unassign_adapter_store(struct device *dev, return count; } -DEVICE_ATTR_WO(unassign_adapter); +static DEVICE_ATTR_WO(unassign_adapter); static int vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev, @@ -491,7 +491,7 @@ static ssize_t assign_domain_store(struct device *dev, return ret; } -DEVICE_ATTR_WO(assign_domain); +static DEVICE_ATTR_WO(assign_domain); /** @@ -537,7 +537,7 @@ static ssize_t unassign_domain_store(struct device *dev, return count; } -DEVICE_ATTR_WO(unassign_domain); +static DEVICE_ATTR_WO(unassign_domain); /** * assign_control_domain_store @@ -586,7 +586,7 @@ static ssize_t assign_control_domain_store(struct device *dev, return count; } -DEVICE_ATTR_WO(assign_control_domain); +static DEVICE_ATTR_WO(assign_control_domain); /** * unassign_control_domain_store @@ -630,7 +630,7 @@ static ssize_t unassign_control_domain_store(struct device *dev, return count; } -DEVICE_ATTR_WO(unassign_control_domain); +static DEVICE_ATTR_WO(unassign_control_domain); static ssize_t control_domains_show(struct device *dev, struct device_attribute *dev_attr, @@ -654,7 +654,7 @@ static ssize_t control_domains_show(struct device *dev, return nchars; } -DEVICE_ATTR_RO(control_domains); +static DEVICE_ATTR_RO(control_domains); static ssize_t matrix_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -704,7 +704,7 @@ static ssize_t matrix_show(struct device *dev, struct device_attribute *attr, return nchars; } -DEVICE_ATTR_RO(matrix); +static DEVICE_ATTR_RO(matrix); static struct attribute *vfio_ap_mdev_attrs[] = { &dev_attr_assign_adapter.attr, From 42de7b9e216728edbe53e0c4513e06fe3d566c5d Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Mon, 10 Sep 2018 18:29:10 +1000 Subject: [PATCH 059/204] KVM: PPC: Validate TCEs against preregistered memory page sizes The userspace can request an arbitrary supported page size for a DMA window and this works fine as long as the mapped memory is backed with the pages of the same or bigger size; if this is not the case, mm_iommu_ua_to_hpa{_rm}() fail and tables do not populated with dangerously incorrect TCEs. However since it is quite easy to misconfigure the KVM and we do not do reverts to all changes made to TCE tables if an error happens in a middle, we better do the acceptable page size validation before we even touch the tables. This enhances kvmppc_tce_validate() to check the hardware IOMMU page sizes against the preregistered memory page sizes. Since the new check uses real/virtual mode helpers, this renames kvmppc_tce_validate() to kvmppc_rm_tce_validate() to handle the real mode case and mirrors it for the virtual mode under the old name. The real mode handler is not used for the virtual mode as: 1. it uses _lockless() list traversing primitives instead of RCU; 2. realmode's mm_iommu_ua_to_hpa_rm() uses vmalloc_to_phys() which virtual mode does not have to use and since on POWER9+radix only virtual mode handlers actually work, we do not want to slow down that path even a bit. This removes EXPORT_SYMBOL_GPL(kvmppc_tce_validate) as the validators are static now. From now on the attempts on mapping IOMMU pages bigger than allowed will result in KVM exit. Signed-off-by: Alexey Kardashevskiy Reviewed-by: David Gibson [mpe: Fix KVM_HV=n build] Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/kvm_ppc.h | 2 -- arch/powerpc/kvm/book3s_64_vio.c | 35 +++++++++++++++++++++++++++++ arch/powerpc/kvm/book3s_64_vio_hv.c | 32 ++++++++++++++++++++------ 3 files changed, 60 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index e991821dd7fa..2f5d431e438b 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -194,8 +194,6 @@ extern struct kvmppc_spapr_tce_table *kvmppc_find_table( (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \ (stt)->size, (ioba), (npages)) ? \ H_PARAMETER : H_SUCCESS) -extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt, - unsigned long tce); extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, unsigned long *ua, unsigned long **prmap); extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt, diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 984cec822a98..01e1994daff0 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -363,6 +363,41 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, return ret; } +static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, + unsigned long tce) +{ + unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE); + enum dma_data_direction dir = iommu_tce_direction(tce); + struct kvmppc_spapr_tce_iommu_table *stit; + unsigned long ua = 0; + + /* Allow userspace to poison TCE table */ + if (dir == DMA_NONE) + return H_SUCCESS; + + if (iommu_tce_check_gpa(stt->page_shift, gpa)) + return H_TOO_HARD; + + if (kvmppc_gpa_to_ua(stt->kvm, tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), + &ua, NULL)) + return H_TOO_HARD; + + list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { + unsigned long hpa = 0; + struct mm_iommu_table_group_mem_t *mem; + long shift = stit->tbl->it_page_shift; + + mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift); + if (!mem) + return H_TOO_HARD; + + if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) + return H_TOO_HARD; + } + + return H_SUCCESS; +} + static void kvmppc_clear_tce(struct iommu_table *tbl, unsigned long entry) { unsigned long hpa = 0; diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 7388b660e648..3c05fc22de07 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -87,6 +87,7 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm, } EXPORT_SYMBOL_GPL(kvmppc_find_table); +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE /* * Validates TCE address. * At the moment flags and page mask are validated. @@ -94,14 +95,14 @@ EXPORT_SYMBOL_GPL(kvmppc_find_table); * to the table and user space is supposed to process them), we can skip * checking other things (such as TCE is a guest RAM address or the page * was actually allocated). - * - * WARNING: This will be called in real-mode on HV KVM and virtual - * mode on PR KVM */ -long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce) +static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt, + unsigned long tce) { unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE); enum dma_data_direction dir = iommu_tce_direction(tce); + struct kvmppc_spapr_tce_iommu_table *stit; + unsigned long ua = 0; /* Allow userspace to poison TCE table */ if (dir == DMA_NONE) @@ -110,9 +111,26 @@ long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce) if (iommu_tce_check_gpa(stt->page_shift, gpa)) return H_PARAMETER; + if (kvmppc_gpa_to_ua(stt->kvm, tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), + &ua, NULL)) + return H_TOO_HARD; + + list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { + unsigned long hpa = 0; + struct mm_iommu_table_group_mem_t *mem; + long shift = stit->tbl->it_page_shift; + + mem = mm_iommu_lookup_rm(stt->kvm->mm, ua, 1ULL << shift); + if (!mem) + return H_TOO_HARD; + + if (mm_iommu_ua_to_hpa_rm(mem, ua, shift, &hpa)) + return H_TOO_HARD; + } + return H_SUCCESS; } -EXPORT_SYMBOL_GPL(kvmppc_tce_validate); +#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ /* Note on the use of page_address() in real mode, * @@ -368,7 +386,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, if (ret != H_SUCCESS) return ret; - ret = kvmppc_tce_validate(stt, tce); + ret = kvmppc_rm_tce_validate(stt, tce); if (ret != H_SUCCESS) return ret; @@ -521,7 +539,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, for (i = 0; i < npages; ++i) { unsigned long tce = be64_to_cpu(((u64 *)tces)[i]); - ret = kvmppc_tce_validate(stt, tce); + ret = kvmppc_rm_tce_validate(stt, tce); if (ret != H_SUCCESS) goto unlock_exit; } From 2691f0ff3d96b471a69c7ef29f3340b91056845d Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Mon, 10 Sep 2018 18:29:11 +1000 Subject: [PATCH 060/204] KVM: PPC: Propagate errors to the guest when failed instead of ignoring At the moment if the PUT_TCE{_INDIRECT} handlers fail to update the hardware tables, we print a warning once, clear the entry and continue. This is so as at the time the assumption was that if a VFIO device is hotplugged into the guest, and the userspace replays virtual DMA mappings (i.e. TCEs) to the hardware tables and if this fails, then there is nothing useful we can do about it. However the assumption is not valid as these handlers are not called for TCE replay (VFIO ioctl interface is used for that) and these handlers are for new TCEs. This returns an error to the guest if there is a request which cannot be processed. By now the only possible failure must be H_TOO_HARD. Signed-off-by: Alexey Kardashevskiy Reviewed-by: David Gibson Signed-off-by: Michael Ellerman --- arch/powerpc/kvm/book3s_64_vio.c | 20 ++++++-------------- arch/powerpc/kvm/book3s_64_vio_hv.c | 21 +++++++-------------- 2 files changed, 13 insertions(+), 28 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 01e1994daff0..8231b178eac6 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -568,14 +568,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl, entry, ua, dir); - if (ret == H_SUCCESS) - continue; - - if (ret == H_TOO_HARD) + if (ret != H_SUCCESS) { + kvmppc_clear_tce(stit->tbl, entry); goto unlock_exit; - - WARN_ON_ONCE(1); - kvmppc_clear_tce(stit->tbl, entry); + } } kvmppc_tce_put(stt, entry, tce); @@ -663,14 +659,10 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, stit->tbl, entry + i, ua, iommu_tce_direction(tce)); - if (ret == H_SUCCESS) - continue; - - if (ret == H_TOO_HARD) + if (ret != H_SUCCESS) { + kvmppc_clear_tce(stit->tbl, entry); goto unlock_exit; - - WARN_ON_ONCE(1); - kvmppc_clear_tce(stit->tbl, entry); + } } kvmppc_tce_put(stt, entry + i, tce); diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 3c05fc22de07..ae147b5c7571 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -405,14 +405,10 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt, stit->tbl, entry, ua, dir); - if (ret == H_SUCCESS) - continue; - - if (ret == H_TOO_HARD) + if (ret != H_SUCCESS) { + kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); return ret; - - WARN_ON_ONCE_RM(1); - kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); + } } kvmppc_tce_put(stt, entry, tce); @@ -558,14 +554,11 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, stit->tbl, entry + i, ua, iommu_tce_direction(tce)); - if (ret == H_SUCCESS) - continue; - - if (ret == H_TOO_HARD) + if (ret != H_SUCCESS) { + kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, + entry); goto unlock_exit; - - WARN_ON_ONCE_RM(1); - kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); + } } kvmppc_tce_put(stt, entry + i, tce); From a3ac077b75c5a922dcbafd7e689ee09beefae0f6 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Mon, 10 Sep 2018 18:29:12 +1000 Subject: [PATCH 061/204] KVM: PPC: Remove redundand permission bits removal The kvmppc_gpa_to_ua() helper itself takes care of the permission bits in the TCE and yet every single caller removes them. This changes semantics of kvmppc_gpa_to_ua() so it takes TCEs (which are GPAs + TCE permission bits) to make the callers simpler. This should cause no behavioural change. Signed-off-by: Alexey Kardashevskiy Reviewed-by: David Gibson Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/kvm_ppc.h | 2 +- arch/powerpc/kvm/book3s_64_vio.c | 12 ++++-------- arch/powerpc/kvm/book3s_64_vio_hv.c | 22 +++++++++------------- 3 files changed, 14 insertions(+), 22 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 2f5d431e438b..38d03282bb6a 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -194,7 +194,7 @@ extern struct kvmppc_spapr_tce_table *kvmppc_find_table( (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \ (stt)->size, (ioba), (npages)) ? \ H_PARAMETER : H_SUCCESS) -extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, +extern long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce, unsigned long *ua, unsigned long **prmap); extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt, unsigned long idx, unsigned long tce); diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 8231b178eac6..c0c64d11cc71 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -378,8 +378,7 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, if (iommu_tce_check_gpa(stt->page_shift, gpa)) return H_TOO_HARD; - if (kvmppc_gpa_to_ua(stt->kvm, tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), - &ua, NULL)) + if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL)) return H_TOO_HARD; list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { @@ -552,8 +551,7 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, idx = srcu_read_lock(&vcpu->kvm->srcu); - if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm, - tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) { + if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) { ret = H_PARAMETER; goto unlock_exit; } @@ -614,7 +612,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, return ret; idx = srcu_read_lock(&vcpu->kvm->srcu); - if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) { + if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) { ret = H_TOO_HARD; goto unlock_exit; } @@ -649,9 +647,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, } tce = be64_to_cpu(tce); - if (kvmppc_gpa_to_ua(vcpu->kvm, - tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), - &ua, NULL)) + if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) return H_PARAMETER; list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index ae147b5c7571..ec99363fdf54 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -111,8 +111,7 @@ static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt, if (iommu_tce_check_gpa(stt->page_shift, gpa)) return H_PARAMETER; - if (kvmppc_gpa_to_ua(stt->kvm, tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), - &ua, NULL)) + if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL)) return H_TOO_HARD; list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { @@ -182,10 +181,10 @@ void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt, } EXPORT_SYMBOL_GPL(kvmppc_tce_put); -long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, +long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce, unsigned long *ua, unsigned long **prmap) { - unsigned long gfn = gpa >> PAGE_SHIFT; + unsigned long gfn = tce >> PAGE_SHIFT; struct kvm_memory_slot *memslot; memslot = search_memslots(kvm_memslots(kvm), gfn); @@ -193,7 +192,7 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, return -EINVAL; *ua = __gfn_to_hva_memslot(memslot, gfn) | - (gpa & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE)); + (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE)); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE if (prmap) @@ -202,7 +201,7 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, return 0; } -EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua); +EXPORT_SYMBOL_GPL(kvmppc_tce_to_ua); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl, @@ -391,8 +390,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, return ret; dir = iommu_tce_direction(tce); - if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm, - tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) + if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) return H_PARAMETER; entry = ioba >> stt->page_shift; @@ -494,7 +492,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, */ struct mm_iommu_table_group_mem_t *mem; - if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) + if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) return H_TOO_HARD; mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K); @@ -510,7 +508,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, * We do not require memory to be preregistered in this case * so lock rmap and do __find_linux_pte_or_hugepte(). */ - if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap)) + if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap)) return H_TOO_HARD; rmap = (void *) vmalloc_to_phys(rmap); @@ -544,9 +542,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, unsigned long tce = be64_to_cpu(((u64 *)tces)[i]); ua = 0; - if (kvmppc_gpa_to_ua(vcpu->kvm, - tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), - &ua, NULL)) + if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) return H_PARAMETER; list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { From e7b17d50471a8bfddf7c976c7d3a68104272b2f3 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 8 Oct 2018 16:30:47 +1100 Subject: [PATCH 062/204] powerpc: Turn off CPU_FTR_P9_TM_HV_ASSIST in non-hypervisor mode When doing nested virtualization, it is only necessary to do the transactional memory hypervisor assist at level 0, that is, when we are in hypervisor mode. Nested hypervisors can just use the TM facilities as architected. Therefore we should clear the CPU_FTR_P9_TM_HV_ASSIST bit when we are not in hypervisor mode, along with the CPU_FTR_HVMODE bit. Doing this will not change anything at this stage because the only code that tests CPU_FTR_P9_TM_HV_ASSIST is in HV KVM, which currently can only be used when when CPU_FTR_HVMODE is set. Reviewed-by: David Gibson Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/cpu_setup_power.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index 458b928dbd84..c317080db771 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S @@ -147,8 +147,8 @@ __init_hvmode_206: rldicl. r0,r3,4,63 bnelr ld r5,CPU_SPEC_FEATURES(r4) - LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE) - xor r5,r5,r6 + LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE | CPU_FTR_P9_TM_HV_ASSIST) + andc r5,r5,r6 std r5,CPU_SPEC_FEATURES(r4) blr From d24ea8a7336a2c392728e2cf909d607a680feb7b Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 8 Oct 2018 16:30:48 +1100 Subject: [PATCH 063/204] KVM: PPC: Book3S: Simplify external interrupt handling Currently we use two bits in the vcpu pending_exceptions bitmap to indicate that an external interrupt is pending for the guest, one for "one-shot" interrupts that are cleared when delivered, and one for interrupts that persist until cleared by an explicit action of the OS (e.g. an acknowledge to an interrupt controller). The BOOK3S_IRQPRIO_EXTERNAL bit is used for one-shot interrupt requests and BOOK3S_IRQPRIO_EXTERNAL_LEVEL is used for persisting interrupts. In practice BOOK3S_IRQPRIO_EXTERNAL never gets used, because our Book3S platforms generally, and pseries in particular, expect external interrupt requests to persist until they are acknowledged at the interrupt controller. That combined with the confusion introduced by having two bits for what is essentially the same thing makes it attractive to simplify things by only using one bit. This patch does that. With this patch there is only BOOK3S_IRQPRIO_EXTERNAL, and by default it has the semantics of a persisting interrupt. In order to avoid breaking the ABI, we introduce a new "external_oneshot" flag which preserves the behaviour of the KVM_INTERRUPT ioctl with the KVM_INTERRUPT_SET argument. Reviewed-by: David Gibson Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/kvm_asm.h | 4 +- arch/powerpc/include/asm/kvm_host.h | 1 + arch/powerpc/kvm/book3s.c | 43 ++++++++++++++----- arch/powerpc/kvm/book3s_hv_rm_xics.c | 5 +-- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 4 +- arch/powerpc/kvm/book3s_pr.c | 1 - arch/powerpc/kvm/book3s_xics.c | 11 ++--- arch/powerpc/kvm/book3s_xive_template.c | 2 +- arch/powerpc/kvm/trace_book3s.h | 1 - .../perf/arch/powerpc/util/book3s_hv_exits.h | 1 - 10 files changed, 44 insertions(+), 29 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index a790d5cf6ea3..1f321914676d 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -84,7 +84,6 @@ #define BOOK3S_INTERRUPT_INST_STORAGE 0x400 #define BOOK3S_INTERRUPT_INST_SEGMENT 0x480 #define BOOK3S_INTERRUPT_EXTERNAL 0x500 -#define BOOK3S_INTERRUPT_EXTERNAL_LEVEL 0x501 #define BOOK3S_INTERRUPT_EXTERNAL_HV 0x502 #define BOOK3S_INTERRUPT_ALIGNMENT 0x600 #define BOOK3S_INTERRUPT_PROGRAM 0x700 @@ -134,8 +133,7 @@ #define BOOK3S_IRQPRIO_EXTERNAL 14 #define BOOK3S_IRQPRIO_DECREMENTER 15 #define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR 16 -#define BOOK3S_IRQPRIO_EXTERNAL_LEVEL 17 -#define BOOK3S_IRQPRIO_MAX 18 +#define BOOK3S_IRQPRIO_MAX 17 #define BOOK3S_HFLAG_DCBZ32 0x1 #define BOOK3S_HFLAG_SLB 0x2 diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 906bcbdfd2a1..3cd0b9f45c2a 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -707,6 +707,7 @@ struct kvm_vcpu_arch { u8 hcall_needed; u8 epr_flags; /* KVMPPC_EPR_xxx */ u8 epr_needed; + u8 external_oneshot; /* clear external irq after delivery */ u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */ diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 87348e498c89..66a55218e8dd 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -150,7 +150,6 @@ static int kvmppc_book3s_vec2irqprio(unsigned int vec) case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break; case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break; case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break; - case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break; case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break; case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break; case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break; @@ -236,18 +235,35 @@ EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec); void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { - unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL; + /* + * This case (KVM_INTERRUPT_SET) should never actually arise for + * a pseries guest (because pseries guests expect their interrupt + * controllers to continue asserting an external interrupt request + * until it is acknowledged at the interrupt controller), but is + * included to avoid ABI breakage and potentially for other + * sorts of guest. + * + * There is a subtlety here: HV KVM does not test the + * external_oneshot flag in the code that synthesizes + * external interrupts for the guest just before entering + * the guest. That is OK even if userspace did do a + * KVM_INTERRUPT_SET on a pseries guest vcpu, because the + * caller (kvm_vcpu_ioctl_interrupt) does a kvm_vcpu_kick() + * which ends up doing a smp_send_reschedule(), which will + * pull the guest all the way out to the host, meaning that + * we will call kvmppc_core_prepare_to_enter() before entering + * the guest again, and that will handle the external_oneshot + * flag correctly. + */ + if (irq->irq == KVM_INTERRUPT_SET) + vcpu->arch.external_oneshot = 1; - if (irq->irq == KVM_INTERRUPT_SET_LEVEL) - vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL; - - kvmppc_book3s_queue_irqprio(vcpu, vec); + kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); } void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu) { kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); - kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL); } void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar, @@ -278,7 +294,6 @@ static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, vec = BOOK3S_INTERRUPT_DECREMENTER; break; case BOOK3S_IRQPRIO_EXTERNAL: - case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; vec = BOOK3S_INTERRUPT_EXTERNAL; break; @@ -352,8 +367,16 @@ static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority) case BOOK3S_IRQPRIO_DECREMENTER: /* DEC interrupts get cleared by mtdec */ return false; - case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: - /* External interrupts get cleared by userspace */ + case BOOK3S_IRQPRIO_EXTERNAL: + /* + * External interrupts get cleared by userspace + * except when set by the KVM_INTERRUPT ioctl with + * KVM_INTERRUPT_SET (not KVM_INTERRUPT_SET_LEVEL). + */ + if (vcpu->arch.external_oneshot) { + vcpu->arch.external_oneshot = 0; + return true; + } return false; } diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c index 758d1d23215e..8b9f35689648 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_xics.c +++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c @@ -136,7 +136,7 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu, /* Mark the target VCPU as having an interrupt pending */ vcpu->stat.queue_intr++; - set_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); + set_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); /* Kick self ? Just set MER and return */ if (vcpu == this_vcpu) { @@ -170,8 +170,7 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu, static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu) { /* Note: Only called on self ! */ - clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, - &vcpu->arch.pending_exceptions); + clear_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_MER); } diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 1d14046124a0..77960e68f7b0 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1122,11 +1122,11 @@ kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */ /* Check if we can deliver an external or decrementer interrupt now */ ld r0, VCPU_PENDING_EXC(r4) - rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63 + rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL, 63 cmpdi cr1, r0, 0 andi. r8, r11, MSR_EE mfspr r8, SPRN_LPCR - /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */ + /* Insert EXTERNAL bit into LPCR at the MER bit position */ rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH mtspr SPRN_LPCR, r8 isync diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 614ebb4261f7..059683e4e67a 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -1246,7 +1246,6 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_EXTERNAL: - case BOOK3S_INTERRUPT_EXTERNAL_LEVEL: case BOOK3S_INTERRUPT_EXTERNAL_HV: case BOOK3S_INTERRUPT_H_VIRT: vcpu->stat.ext_intr_exits++; diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index b8356cdc0c04..d9ba1b06d0f5 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c @@ -310,7 +310,7 @@ static inline bool icp_try_update(struct kvmppc_icp *icp, */ if (new.out_ee) { kvmppc_book3s_queue_irqprio(icp->vcpu, - BOOK3S_INTERRUPT_EXTERNAL_LEVEL); + BOOK3S_INTERRUPT_EXTERNAL); if (!change_self) kvmppc_fast_vcpu_kick(icp->vcpu); } @@ -593,8 +593,7 @@ static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu) u32 xirr; /* First, remove EE from the processor */ - kvmppc_book3s_dequeue_irqprio(icp->vcpu, - BOOK3S_INTERRUPT_EXTERNAL_LEVEL); + kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL); /* * ICP State: Accept_Interrupt @@ -754,8 +753,7 @@ static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) * We can remove EE from the current processor, the update * transaction will set it again if needed */ - kvmppc_book3s_dequeue_irqprio(icp->vcpu, - BOOK3S_INTERRUPT_EXTERNAL_LEVEL); + kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL); do { old_state = new_state = READ_ONCE(icp->state); @@ -1167,8 +1165,7 @@ int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval) * Deassert the CPU interrupt request. * icp_try_update will reassert it if necessary. */ - kvmppc_book3s_dequeue_irqprio(icp->vcpu, - BOOK3S_INTERRUPT_EXTERNAL_LEVEL); + kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL); /* * Note that if we displace an interrupt from old_state.xisr, diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c index 4171ede8722b..203ea654c81e 100644 --- a/arch/powerpc/kvm/book3s_xive_template.c +++ b/arch/powerpc/kvm/book3s_xive_template.c @@ -285,7 +285,7 @@ X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu) * set by pull or an escalation interrupts). */ if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions)) - clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, + clear_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n", diff --git a/arch/powerpc/kvm/trace_book3s.h b/arch/powerpc/kvm/trace_book3s.h index f3b23759e017..372a82fa2de3 100644 --- a/arch/powerpc/kvm/trace_book3s.h +++ b/arch/powerpc/kvm/trace_book3s.h @@ -14,7 +14,6 @@ {0x400, "INST_STORAGE"}, \ {0x480, "INST_SEGMENT"}, \ {0x500, "EXTERNAL"}, \ - {0x501, "EXTERNAL_LEVEL"}, \ {0x502, "EXTERNAL_HV"}, \ {0x600, "ALIGNMENT"}, \ {0x700, "PROGRAM"}, \ diff --git a/tools/perf/arch/powerpc/util/book3s_hv_exits.h b/tools/perf/arch/powerpc/util/book3s_hv_exits.h index 853b95d1e139..2011376c7ab5 100644 --- a/tools/perf/arch/powerpc/util/book3s_hv_exits.h +++ b/tools/perf/arch/powerpc/util/book3s_hv_exits.h @@ -15,7 +15,6 @@ {0x400, "INST_STORAGE"}, \ {0x480, "INST_SEGMENT"}, \ {0x500, "EXTERNAL"}, \ - {0x501, "EXTERNAL_LEVEL"}, \ {0x502, "EXTERNAL_HV"}, \ {0x600, "ALIGNMENT"}, \ {0x700, "PROGRAM"}, \ From 966eba9316d429404bdc9a0b8cf45b4d6dd46e8f Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 8 Oct 2018 16:30:49 +1100 Subject: [PATCH 064/204] KVM: PPC: Book3S HV: Remove left-over code in XICS-on-XIVE emulation This removes code that clears the external interrupt pending bit in the pending_exceptions bitmap. This is left over from an earlier iteration of the code where this bit was set when an escalation interrupt arrived in order to wake the vcpu from cede. Currently we set the vcpu->arch.irq_pending flag instead for this purpose. Therefore there is no need to do anything with the pending_exceptions bitmap. Reviewed-by: David Gibson Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/kvm/book3s_xive_template.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c index 203ea654c81e..033363d6e764 100644 --- a/arch/powerpc/kvm/book3s_xive_template.c +++ b/arch/powerpc/kvm/book3s_xive_template.c @@ -280,14 +280,6 @@ X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu) /* First collect pending bits from HW */ GLUE(X_PFX,ack_pending)(xc); - /* - * Cleanup the old-style bits if needed (they may have been - * set by pull or an escalation interrupts). - */ - if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions)) - clear_bit(BOOK3S_IRQPRIO_EXTERNAL, - &vcpu->arch.pending_exceptions); - pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n", xc->pending, xc->hw_cppr, xc->cppr); From f7035ce9f1dfb1042c4acedf5cca6f9af395f110 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 8 Oct 2018 16:30:50 +1100 Subject: [PATCH 065/204] KVM: PPC: Book3S HV: Move interrupt delivery on guest entry to C code This is based on a patch by Suraj Jitindar Singh. This moves the code in book3s_hv_rmhandlers.S that generates an external, decrementer or privileged doorbell interrupt just before entering the guest to C code in book3s_hv_builtin.c. This is to make future maintenance and modification easier. The algorithm expressed in the C code is almost identical to the previous algorithm. Reviewed-by: David Gibson Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/kvm_ppc.h | 1 + arch/powerpc/kvm/book3s_hv.c | 3 +- arch/powerpc/kvm/book3s_hv_builtin.c | 48 +++++++++++++++++ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 70 ++++++------------------- 4 files changed, 67 insertions(+), 55 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 38d03282bb6a..03a60f76f3d7 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -650,6 +650,7 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, unsigned long mfrr); int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr); int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr); +void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu); /* * Host-side operations we want to set up while running in real diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 3e3a71594e63..49a686c1185c 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -730,8 +730,7 @@ static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu) /* * Ensure that the read of vcore->dpdes comes after the read * of vcpu->doorbell_request. This barrier matches the - * lwsync in book3s_hv_rmhandlers.S just before the - * fast_guest_return label. + * smb_wmb() in kvmppc_guest_entry_inject(). */ smp_rmb(); vc = vcpu->arch.vcore; diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index fc6bb9630a9c..ccfea5b96d89 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -729,3 +729,51 @@ void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip) smp_mb(); local_paca->kvm_hstate.kvm_split_mode = NULL; } + +/* + * Is there a PRIV_DOORBELL pending for the guest (on POWER9)? + * Can we inject a Decrementer or a External interrupt? + */ +void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu) +{ + int ext; + unsigned long vec = 0; + unsigned long lpcr; + + /* Insert EXTERNAL bit into LPCR at the MER bit position */ + ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1; + lpcr = mfspr(SPRN_LPCR); + lpcr |= ext << LPCR_MER_SH; + mtspr(SPRN_LPCR, lpcr); + isync(); + + if (vcpu->arch.shregs.msr & MSR_EE) { + if (ext) { + vec = BOOK3S_INTERRUPT_EXTERNAL; + } else { + long int dec = mfspr(SPRN_DEC); + if (!(lpcr & LPCR_LD)) + dec = (int) dec; + if (dec < 0) + vec = BOOK3S_INTERRUPT_DECREMENTER; + } + } + if (vec) { + unsigned long msr, old_msr = vcpu->arch.shregs.msr; + + kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); + kvmppc_set_srr1(vcpu, old_msr); + kvmppc_set_pc(vcpu, vec); + msr = vcpu->arch.intr_msr; + if (MSR_TM_ACTIVE(old_msr)) + msr |= MSR_TS_S; + vcpu->arch.shregs.msr = msr; + } + + if (vcpu->arch.doorbell_request) { + mtspr(SPRN_DPDES, 1); + vcpu->arch.vcore->dpdes = 1; + smp_wmb(); + vcpu->arch.doorbell_request = 0; + } +} diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 77960e68f7b0..6752da132ba6 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1101,13 +1101,20 @@ no_xive: #endif /* CONFIG_KVM_XICS */ deliver_guest_interrupt: - ld r6, VCPU_CTR(r4) - ld r7, VCPU_XER(r4) - - mtctr r6 - mtxer r7 - kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */ + /* Check if we can deliver an external or decrementer interrupt now */ + ld r0, VCPU_PENDING_EXC(r4) +BEGIN_FTR_SECTION + /* On POWER9, also check for emulated doorbell interrupt */ + lbz r3, VCPU_DBELL_REQ(r4) + or r0, r0, r3 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) + cmpdi r0, 0 + beq 71f + mr r3, r4 + bl kvmppc_guest_entry_inject_int + ld r4, HSTATE_KVM_VCPU(r13) +71: ld r10, VCPU_PC(r4) ld r11, VCPU_MSR(r4) ld r6, VCPU_SRR0(r4) @@ -1120,53 +1127,10 @@ kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */ rotldi r11, r11, 1 + MSR_HV_LG ori r11, r11, MSR_ME - /* Check if we can deliver an external or decrementer interrupt now */ - ld r0, VCPU_PENDING_EXC(r4) - rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL, 63 - cmpdi cr1, r0, 0 - andi. r8, r11, MSR_EE - mfspr r8, SPRN_LPCR - /* Insert EXTERNAL bit into LPCR at the MER bit position */ - rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH - mtspr SPRN_LPCR, r8 - isync - beq 5f - li r0, BOOK3S_INTERRUPT_EXTERNAL - bne cr1, 12f - mfspr r0, SPRN_DEC -BEGIN_FTR_SECTION - /* On POWER9 check whether the guest has large decrementer enabled */ - andis. r8, r8, LPCR_LD@h - bne 15f -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) - extsw r0, r0 -15: cmpdi r0, 0 - li r0, BOOK3S_INTERRUPT_DECREMENTER - bge 5f - -12: mtspr SPRN_SRR0, r10 - mr r10,r0 - mtspr SPRN_SRR1, r11 - mr r9, r4 - bl kvmppc_msr_interrupt -5: -BEGIN_FTR_SECTION - b fast_guest_return -END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) - /* On POWER9, check for pending doorbell requests */ - lbz r0, VCPU_DBELL_REQ(r4) - cmpwi r0, 0 - beq fast_guest_return - ld r5, HSTATE_KVM_VCORE(r13) - /* Set DPDES register so the CPU will take a doorbell interrupt */ - li r0, 1 - mtspr SPRN_DPDES, r0 - std r0, VCORE_DPDES(r5) - /* Make sure other cpus see vcore->dpdes set before dbell req clear */ - lwsync - /* Clear the pending doorbell request */ - li r0, 0 - stb r0, VCPU_DBELL_REQ(r4) + ld r6, VCPU_CTR(r4) + ld r7, VCPU_XER(r4) + mtctr r6 + mtxer r7 /* * Required state: From 41f4e631daf80d16d818ac17c4144bd5b9a11f33 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 8 Oct 2018 16:30:51 +1100 Subject: [PATCH 066/204] KVM: PPC: Book3S HV: Extract PMU save/restore operations as C-callable functions This pulls out the assembler code that is responsible for saving and restoring the PMU state for the host and guest into separate functions so they can be used from an alternate entry path. The calling convention is made compatible with C. Reviewed-by: David Gibson Signed-off-by: Paul Mackerras Reviewed-by: Madhavan Srinivasan Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/asm-prototypes.h | 5 + arch/powerpc/kvm/book3s_hv_interrupts.S | 95 +++--- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 363 ++++++++++++---------- 3 files changed, 253 insertions(+), 210 deletions(-) diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 1f4691ce4126..024e8fc905d7 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -150,4 +150,9 @@ extern s32 patch__memset_nocache, patch__memcpy_nocache; extern long flush_count_cache; +void kvmhv_save_host_pmu(void); +void kvmhv_load_host_pmu(void); +void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use); +void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu); + #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S index 666b91c79eb4..a6d10010d9e8 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupts.S +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S @@ -64,52 +64,7 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) /* Save host PMU registers */ -BEGIN_FTR_SECTION - /* Work around P8 PMAE bug */ - li r3, -1 - clrrdi r3, r3, 10 - mfspr r8, SPRN_MMCR2 - mtspr SPRN_MMCR2, r3 /* freeze all counters using MMCR2 */ - isync -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) - li r3, 1 - sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ - mfspr r7, SPRN_MMCR0 /* save MMCR0 */ - mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */ - mfspr r6, SPRN_MMCRA - /* Clear MMCRA in order to disable SDAR updates */ - li r5, 0 - mtspr SPRN_MMCRA, r5 - isync - lbz r5, PACA_PMCINUSE(r13) /* is the host using the PMU? */ - cmpwi r5, 0 - beq 31f /* skip if not */ - mfspr r5, SPRN_MMCR1 - mfspr r9, SPRN_SIAR - mfspr r10, SPRN_SDAR - std r7, HSTATE_MMCR0(r13) - std r5, HSTATE_MMCR1(r13) - std r6, HSTATE_MMCRA(r13) - std r9, HSTATE_SIAR(r13) - std r10, HSTATE_SDAR(r13) -BEGIN_FTR_SECTION - mfspr r9, SPRN_SIER - std r8, HSTATE_MMCR2(r13) - std r9, HSTATE_SIER(r13) -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) - mfspr r3, SPRN_PMC1 - mfspr r5, SPRN_PMC2 - mfspr r6, SPRN_PMC3 - mfspr r7, SPRN_PMC4 - mfspr r8, SPRN_PMC5 - mfspr r9, SPRN_PMC6 - stw r3, HSTATE_PMC1(r13) - stw r5, HSTATE_PMC2(r13) - stw r6, HSTATE_PMC3(r13) - stw r7, HSTATE_PMC4(r13) - stw r8, HSTATE_PMC5(r13) - stw r9, HSTATE_PMC6(r13) -31: + bl kvmhv_save_host_pmu /* * Put whatever is in the decrementer into the @@ -161,3 +116,51 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) ld r0, PPC_LR_STKOFF(r1) mtlr r0 blr + +_GLOBAL(kvmhv_save_host_pmu) +BEGIN_FTR_SECTION + /* Work around P8 PMAE bug */ + li r3, -1 + clrrdi r3, r3, 10 + mfspr r8, SPRN_MMCR2 + mtspr SPRN_MMCR2, r3 /* freeze all counters using MMCR2 */ + isync +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + li r3, 1 + sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ + mfspr r7, SPRN_MMCR0 /* save MMCR0 */ + mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */ + mfspr r6, SPRN_MMCRA + /* Clear MMCRA in order to disable SDAR updates */ + li r5, 0 + mtspr SPRN_MMCRA, r5 + isync + lbz r5, PACA_PMCINUSE(r13) /* is the host using the PMU? */ + cmpwi r5, 0 + beq 31f /* skip if not */ + mfspr r5, SPRN_MMCR1 + mfspr r9, SPRN_SIAR + mfspr r10, SPRN_SDAR + std r7, HSTATE_MMCR0(r13) + std r5, HSTATE_MMCR1(r13) + std r6, HSTATE_MMCRA(r13) + std r9, HSTATE_SIAR(r13) + std r10, HSTATE_SDAR(r13) +BEGIN_FTR_SECTION + mfspr r9, SPRN_SIER + std r8, HSTATE_MMCR2(r13) + std r9, HSTATE_SIER(r13) +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + mfspr r3, SPRN_PMC1 + mfspr r5, SPRN_PMC2 + mfspr r6, SPRN_PMC3 + mfspr r7, SPRN_PMC4 + mfspr r8, SPRN_PMC5 + mfspr r9, SPRN_PMC6 + stw r3, HSTATE_PMC1(r13) + stw r5, HSTATE_PMC2(r13) + stw r6, HSTATE_PMC3(r13) + stw r7, HSTATE_PMC4(r13) + stw r8, HSTATE_PMC5(r13) + stw r9, HSTATE_PMC6(r13) +31: blr diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 6752da132ba6..5b2ae34bc04b 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -113,45 +114,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) mtspr SPRN_SPRG_VDSO_WRITE,r3 /* Reload the host's PMU registers */ - lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */ - cmpwi r4, 0 - beq 23f /* skip if not */ -BEGIN_FTR_SECTION - ld r3, HSTATE_MMCR0(r13) - andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO - cmpwi r4, MMCR0_PMAO - beql kvmppc_fix_pmao -END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) - lwz r3, HSTATE_PMC1(r13) - lwz r4, HSTATE_PMC2(r13) - lwz r5, HSTATE_PMC3(r13) - lwz r6, HSTATE_PMC4(r13) - lwz r8, HSTATE_PMC5(r13) - lwz r9, HSTATE_PMC6(r13) - mtspr SPRN_PMC1, r3 - mtspr SPRN_PMC2, r4 - mtspr SPRN_PMC3, r5 - mtspr SPRN_PMC4, r6 - mtspr SPRN_PMC5, r8 - mtspr SPRN_PMC6, r9 - ld r3, HSTATE_MMCR0(r13) - ld r4, HSTATE_MMCR1(r13) - ld r5, HSTATE_MMCRA(r13) - ld r6, HSTATE_SIAR(r13) - ld r7, HSTATE_SDAR(r13) - mtspr SPRN_MMCR1, r4 - mtspr SPRN_MMCRA, r5 - mtspr SPRN_SIAR, r6 - mtspr SPRN_SDAR, r7 -BEGIN_FTR_SECTION - ld r8, HSTATE_MMCR2(r13) - ld r9, HSTATE_SIER(r13) - mtspr SPRN_MMCR2, r8 - mtspr SPRN_SIER, r9 -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) - mtspr SPRN_MMCR0, r3 - isync -23: + bl kvmhv_load_host_pmu /* * Reload DEC. HDEC interrupts were disabled when @@ -805,57 +768,12 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) 91: #endif - /* Load guest PMU registers */ - /* R4 is live here (vcpu pointer) */ - li r3, 1 - sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ - mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ - isync -BEGIN_FTR_SECTION - ld r3, VCPU_MMCR(r4) - andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO - cmpwi r5, MMCR0_PMAO - beql kvmppc_fix_pmao -END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) - lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ - lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ - lwz r6, VCPU_PMC + 8(r4) - lwz r7, VCPU_PMC + 12(r4) - lwz r8, VCPU_PMC + 16(r4) - lwz r9, VCPU_PMC + 20(r4) - mtspr SPRN_PMC1, r3 - mtspr SPRN_PMC2, r5 - mtspr SPRN_PMC3, r6 - mtspr SPRN_PMC4, r7 - mtspr SPRN_PMC5, r8 - mtspr SPRN_PMC6, r9 - ld r3, VCPU_MMCR(r4) - ld r5, VCPU_MMCR + 8(r4) - ld r6, VCPU_MMCR + 16(r4) - ld r7, VCPU_SIAR(r4) - ld r8, VCPU_SDAR(r4) - mtspr SPRN_MMCR1, r5 - mtspr SPRN_MMCRA, r6 - mtspr SPRN_SIAR, r7 - mtspr SPRN_SDAR, r8 -BEGIN_FTR_SECTION - ld r5, VCPU_MMCR + 24(r4) - ld r6, VCPU_SIER(r4) - mtspr SPRN_MMCR2, r5 - mtspr SPRN_SIER, r6 -BEGIN_FTR_SECTION_NESTED(96) - lwz r7, VCPU_PMC + 24(r4) - lwz r8, VCPU_PMC + 28(r4) - ld r9, VCPU_MMCR + 32(r4) - mtspr SPRN_SPMC1, r7 - mtspr SPRN_SPMC2, r8 - mtspr SPRN_MMCRS, r9 -END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) - mtspr SPRN_MMCR0, r3 - isync + /* Load guest PMU registers; r4 = vcpu pointer here */ + mr r3, r4 + bl kvmhv_load_guest_pmu /* Load up FP, VMX and VSX registers */ + ld r4, HSTATE_KVM_VCPU(r13) bl kvmppc_load_fp ld r14, VCPU_GPR(R14)(r4) @@ -1766,83 +1684,12 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) 25: /* Save PMU registers if requested */ /* r8 and cr0.eq are live here */ -BEGIN_FTR_SECTION - /* - * POWER8 seems to have a hardware bug where setting - * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE] - * when some counters are already negative doesn't seem - * to cause a performance monitor alert (and hence interrupt). - * The effect of this is that when saving the PMU state, - * if there is no PMU alert pending when we read MMCR0 - * before freezing the counters, but one becomes pending - * before we read the counters, we lose it. - * To work around this, we need a way to freeze the counters - * before reading MMCR0. Normally, freezing the counters - * is done by writing MMCR0 (to set MMCR0[FC]) which - * unavoidably writes MMCR0[PMA0] as well. On POWER8, - * we can also freeze the counters using MMCR2, by writing - * 1s to all the counter freeze condition bits (there are - * 9 bits each for 6 counters). - */ - li r3, -1 /* set all freeze bits */ - clrrdi r3, r3, 10 - mfspr r10, SPRN_MMCR2 - mtspr SPRN_MMCR2, r3 - isync -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) - li r3, 1 - sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ - mfspr r4, SPRN_MMCR0 /* save MMCR0 */ - mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ - mfspr r6, SPRN_MMCRA - /* Clear MMCRA in order to disable SDAR updates */ - li r7, 0 - mtspr SPRN_MMCRA, r7 - isync + mr r3, r9 + li r4, 1 beq 21f /* if no VPA, save PMU stuff anyway */ - lbz r7, LPPACA_PMCINUSE(r8) - cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */ - bne 21f - std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ - b 22f -21: mfspr r5, SPRN_MMCR1 - mfspr r7, SPRN_SIAR - mfspr r8, SPRN_SDAR - std r4, VCPU_MMCR(r9) - std r5, VCPU_MMCR + 8(r9) - std r6, VCPU_MMCR + 16(r9) -BEGIN_FTR_SECTION - std r10, VCPU_MMCR + 24(r9) -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) - std r7, VCPU_SIAR(r9) - std r8, VCPU_SDAR(r9) - mfspr r3, SPRN_PMC1 - mfspr r4, SPRN_PMC2 - mfspr r5, SPRN_PMC3 - mfspr r6, SPRN_PMC4 - mfspr r7, SPRN_PMC5 - mfspr r8, SPRN_PMC6 - stw r3, VCPU_PMC(r9) - stw r4, VCPU_PMC + 4(r9) - stw r5, VCPU_PMC + 8(r9) - stw r6, VCPU_PMC + 12(r9) - stw r7, VCPU_PMC + 16(r9) - stw r8, VCPU_PMC + 20(r9) -BEGIN_FTR_SECTION - mfspr r5, SPRN_SIER - std r5, VCPU_SIER(r9) -BEGIN_FTR_SECTION_NESTED(96) - mfspr r6, SPRN_SPMC1 - mfspr r7, SPRN_SPMC2 - mfspr r8, SPRN_MMCRS - stw r6, VCPU_PMC + 24(r9) - stw r7, VCPU_PMC + 28(r9) - std r8, VCPU_MMCR + 32(r9) - lis r4, 0x8000 - mtspr SPRN_MMCRS, r4 -END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) -22: + lbz r4, LPPACA_PMCINUSE(r8) +21: bl kvmhv_save_guest_pmu + ld r9, HSTATE_KVM_VCPU(r13) /* Restore host values of some registers */ BEGIN_FTR_SECTION @@ -3387,6 +3234,194 @@ kvmppc_msr_interrupt: 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG blr +/* + * Load up guest PMU state. R3 points to the vcpu struct. + */ +_GLOBAL(kvmhv_load_guest_pmu) +EXPORT_SYMBOL_GPL(kvmhv_load_guest_pmu) + mr r4, r3 + mflr r0 + li r3, 1 + sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ + mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ + isync +BEGIN_FTR_SECTION + ld r3, VCPU_MMCR(r4) + andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO + cmpwi r5, MMCR0_PMAO + beql kvmppc_fix_pmao +END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) + lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ + lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ + lwz r6, VCPU_PMC + 8(r4) + lwz r7, VCPU_PMC + 12(r4) + lwz r8, VCPU_PMC + 16(r4) + lwz r9, VCPU_PMC + 20(r4) + mtspr SPRN_PMC1, r3 + mtspr SPRN_PMC2, r5 + mtspr SPRN_PMC3, r6 + mtspr SPRN_PMC4, r7 + mtspr SPRN_PMC5, r8 + mtspr SPRN_PMC6, r9 + ld r3, VCPU_MMCR(r4) + ld r5, VCPU_MMCR + 8(r4) + ld r6, VCPU_MMCR + 16(r4) + ld r7, VCPU_SIAR(r4) + ld r8, VCPU_SDAR(r4) + mtspr SPRN_MMCR1, r5 + mtspr SPRN_MMCRA, r6 + mtspr SPRN_SIAR, r7 + mtspr SPRN_SDAR, r8 +BEGIN_FTR_SECTION + ld r5, VCPU_MMCR + 24(r4) + ld r6, VCPU_SIER(r4) + mtspr SPRN_MMCR2, r5 + mtspr SPRN_SIER, r6 +BEGIN_FTR_SECTION_NESTED(96) + lwz r7, VCPU_PMC + 24(r4) + lwz r8, VCPU_PMC + 28(r4) + ld r9, VCPU_MMCR + 32(r4) + mtspr SPRN_SPMC1, r7 + mtspr SPRN_SPMC2, r8 + mtspr SPRN_MMCRS, r9 +END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + mtspr SPRN_MMCR0, r3 + isync + mtlr r0 + blr + +/* + * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu. + */ +_GLOBAL(kvmhv_load_host_pmu) +EXPORT_SYMBOL_GPL(kvmhv_load_host_pmu) + mflr r0 + lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */ + cmpwi r4, 0 + beq 23f /* skip if not */ +BEGIN_FTR_SECTION + ld r3, HSTATE_MMCR0(r13) + andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO + cmpwi r4, MMCR0_PMAO + beql kvmppc_fix_pmao +END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) + lwz r3, HSTATE_PMC1(r13) + lwz r4, HSTATE_PMC2(r13) + lwz r5, HSTATE_PMC3(r13) + lwz r6, HSTATE_PMC4(r13) + lwz r8, HSTATE_PMC5(r13) + lwz r9, HSTATE_PMC6(r13) + mtspr SPRN_PMC1, r3 + mtspr SPRN_PMC2, r4 + mtspr SPRN_PMC3, r5 + mtspr SPRN_PMC4, r6 + mtspr SPRN_PMC5, r8 + mtspr SPRN_PMC6, r9 + ld r3, HSTATE_MMCR0(r13) + ld r4, HSTATE_MMCR1(r13) + ld r5, HSTATE_MMCRA(r13) + ld r6, HSTATE_SIAR(r13) + ld r7, HSTATE_SDAR(r13) + mtspr SPRN_MMCR1, r4 + mtspr SPRN_MMCRA, r5 + mtspr SPRN_SIAR, r6 + mtspr SPRN_SDAR, r7 +BEGIN_FTR_SECTION + ld r8, HSTATE_MMCR2(r13) + ld r9, HSTATE_SIER(r13) + mtspr SPRN_MMCR2, r8 + mtspr SPRN_SIER, r9 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + mtspr SPRN_MMCR0, r3 + isync + mtlr r0 +23: blr + +/* + * Save guest PMU state into the vcpu struct. + * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA) + */ +_GLOBAL(kvmhv_save_guest_pmu) +EXPORT_SYMBOL_GPL(kvmhv_save_guest_pmu) + mr r9, r3 + mr r8, r4 +BEGIN_FTR_SECTION + /* + * POWER8 seems to have a hardware bug where setting + * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE] + * when some counters are already negative doesn't seem + * to cause a performance monitor alert (and hence interrupt). + * The effect of this is that when saving the PMU state, + * if there is no PMU alert pending when we read MMCR0 + * before freezing the counters, but one becomes pending + * before we read the counters, we lose it. + * To work around this, we need a way to freeze the counters + * before reading MMCR0. Normally, freezing the counters + * is done by writing MMCR0 (to set MMCR0[FC]) which + * unavoidably writes MMCR0[PMA0] as well. On POWER8, + * we can also freeze the counters using MMCR2, by writing + * 1s to all the counter freeze condition bits (there are + * 9 bits each for 6 counters). + */ + li r3, -1 /* set all freeze bits */ + clrrdi r3, r3, 10 + mfspr r10, SPRN_MMCR2 + mtspr SPRN_MMCR2, r3 + isync +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + li r3, 1 + sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ + mfspr r4, SPRN_MMCR0 /* save MMCR0 */ + mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ + mfspr r6, SPRN_MMCRA + /* Clear MMCRA in order to disable SDAR updates */ + li r7, 0 + mtspr SPRN_MMCRA, r7 + isync + cmpwi r8, 0 /* did they ask for PMU stuff to be saved? */ + bne 21f + std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ + b 22f +21: mfspr r5, SPRN_MMCR1 + mfspr r7, SPRN_SIAR + mfspr r8, SPRN_SDAR + std r4, VCPU_MMCR(r9) + std r5, VCPU_MMCR + 8(r9) + std r6, VCPU_MMCR + 16(r9) +BEGIN_FTR_SECTION + std r10, VCPU_MMCR + 24(r9) +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + std r7, VCPU_SIAR(r9) + std r8, VCPU_SDAR(r9) + mfspr r3, SPRN_PMC1 + mfspr r4, SPRN_PMC2 + mfspr r5, SPRN_PMC3 + mfspr r6, SPRN_PMC4 + mfspr r7, SPRN_PMC5 + mfspr r8, SPRN_PMC6 + stw r3, VCPU_PMC(r9) + stw r4, VCPU_PMC + 4(r9) + stw r5, VCPU_PMC + 8(r9) + stw r6, VCPU_PMC + 12(r9) + stw r7, VCPU_PMC + 16(r9) + stw r8, VCPU_PMC + 20(r9) +BEGIN_FTR_SECTION + mfspr r5, SPRN_SIER + std r5, VCPU_SIER(r9) +BEGIN_FTR_SECTION_NESTED(96) + mfspr r6, SPRN_SPMC1 + mfspr r7, SPRN_SPMC2 + mfspr r8, SPRN_MMCRS + stw r6, VCPU_PMC + 24(r9) + stw r7, VCPU_PMC + 28(r9) + std r8, VCPU_MMCR + 32(r9) + lis r4, 0x8000 + mtspr SPRN_MMCRS, r4 +END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) +22: blr + /* * This works around a hardware bug on POWER8E processors, where * writing a 1 to the MMCR0[PMAO] bit doesn't generate a From df709a296ef7c493a074609019b0e074cde5c0d0 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 8 Oct 2018 16:30:52 +1100 Subject: [PATCH 067/204] KVM: PPC: Book3S HV: Simplify real-mode interrupt handling This streamlines the first part of the code that handles a hypervisor interrupt that occurred in the guest. With this, all of the real-mode handling that occurs is done before the "guest_exit_cont" label; once we get to that label we are committed to exiting to host virtual mode. Thus the machine check and HMI real-mode handling is moved before that label. Also, the code to handle external interrupts is moved out of line, as is the code that calls kvmppc_realmode_hmi_handler(). Signed-off-by: Paul Mackerras Reviewed-by: David Gibson Signed-off-by: Michael Ellerman --- arch/powerpc/kvm/book3s_hv_ras.c | 8 + arch/powerpc/kvm/book3s_hv_rmhandlers.S | 222 ++++++++++++------------ 2 files changed, 120 insertions(+), 110 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c index b11043b23c18..ee564b682f0c 100644 --- a/arch/powerpc/kvm/book3s_hv_ras.c +++ b/arch/powerpc/kvm/book3s_hv_ras.c @@ -331,5 +331,13 @@ long kvmppc_realmode_hmi_handler(void) } else { wait_for_tb_resync(); } + + /* + * Reset tb_offset_applied so the guest exit code won't try + * to subtract the previous timebase offset from the timebase. + */ + if (local_paca->kvm_hstate.kvm_vcore) + local_paca->kvm_hstate.kvm_vcore->tb_offset_applied = 0; + return 0; } diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 5b2ae34bc04b..fc360b5ca9b1 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1018,8 +1018,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) no_xive: #endif /* CONFIG_KVM_XICS */ -deliver_guest_interrupt: -kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */ +deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */ /* Check if we can deliver an external or decrementer interrupt now */ ld r0, VCPU_PENDING_EXC(r4) BEGIN_FTR_SECTION @@ -1269,18 +1268,26 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) std r3, VCPU_CTR(r9) std r4, VCPU_XER(r9) + /* Save more register state */ + mfdar r3 + mfdsisr r4 + std r3, VCPU_DAR(r9) + stw r4, VCPU_DSISR(r9) + + /* If this is a page table miss then see if it's theirs or ours */ + cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE + beq kvmppc_hdsi + std r3, VCPU_FAULT_DAR(r9) + stw r4, VCPU_FAULT_DSISR(r9) + cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE + beq kvmppc_hisi + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* For softpatch interrupt, go off and do TM instruction emulation */ cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH beq kvmppc_tm_emul #endif - /* If this is a page table miss then see if it's theirs or ours */ - cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE - beq kvmppc_hdsi - cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE - beq kvmppc_hisi - /* See if this is a leftover HDEC interrupt */ cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER bne 2f @@ -1303,7 +1310,7 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) lbz r0, HSTATE_HOST_IPI(r13) cmpwi r0, 0 - beq 4f + beq maybe_reenter_guest b guest_exit_cont 3: /* If it's a hypervisor facility unavailable interrupt, save HFSCR */ @@ -1315,82 +1322,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 14: /* External interrupt ? */ cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL - bne+ guest_exit_cont - - /* External interrupt, first check for host_ipi. If this is - * set, we know the host wants us out so let's do it now - */ - bl kvmppc_read_intr - - /* - * Restore the active volatile registers after returning from - * a C function. - */ - ld r9, HSTATE_KVM_VCPU(r13) - li r12, BOOK3S_INTERRUPT_EXTERNAL - - /* - * kvmppc_read_intr return codes: - * - * Exit to host (r3 > 0) - * 1 An interrupt is pending that needs to be handled by the host - * Exit guest and return to host by branching to guest_exit_cont - * - * 2 Passthrough that needs completion in the host - * Exit guest and return to host by branching to guest_exit_cont - * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD - * to indicate to the host to complete handling the interrupt - * - * Before returning to guest, we check if any CPU is heading out - * to the host and if so, we head out also. If no CPUs are heading - * check return values <= 0. - * - * Return to guest (r3 <= 0) - * 0 No external interrupt is pending - * -1 A guest wakeup IPI (which has now been cleared) - * In either case, we return to guest to deliver any pending - * guest interrupts. - * - * -2 A PCI passthrough external interrupt was handled - * (interrupt was delivered directly to guest) - * Return to guest to deliver any pending guest interrupts. - */ - - cmpdi r3, 1 - ble 1f - - /* Return code = 2 */ - li r12, BOOK3S_INTERRUPT_HV_RM_HARD - stw r12, VCPU_TRAP(r9) - b guest_exit_cont - -1: /* Return code <= 1 */ - cmpdi r3, 0 - bgt guest_exit_cont - - /* Return code <= 0 */ -4: ld r5, HSTATE_KVM_VCORE(r13) - lwz r0, VCORE_ENTRY_EXIT(r5) - cmpwi r0, 0x100 - mr r4, r9 - blt deliver_guest_interrupt - -guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ - /* Save more register state */ - mfdar r6 - mfdsisr r7 - std r6, VCPU_DAR(r9) - stw r7, VCPU_DSISR(r9) - /* don't overwrite fault_dar/fault_dsisr if HDSI */ - cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE - beq mc_cont - std r6, VCPU_FAULT_DAR(r9) - stw r7, VCPU_FAULT_DSISR(r9) - + beq kvmppc_guest_external /* See if it is a machine check */ cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK beq machine_check_realmode -mc_cont: + /* Or a hypervisor maintenance interrupt */ + cmpwi r12, BOOK3S_INTERRUPT_HMI + beq hmi_realmode + +guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ + #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING addi r3, r9, VCPU_TB_RMEXIT mr r4, r9 @@ -1821,24 +1762,6 @@ BEGIN_FTR_SECTION mtspr SPRN_DPDES, r8 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) - /* If HMI, call kvmppc_realmode_hmi_handler() */ - lwz r12, STACK_SLOT_TRAP(r1) - cmpwi r12, BOOK3S_INTERRUPT_HMI - bne 27f - bl kvmppc_realmode_hmi_handler - nop - cmpdi r3, 0 - /* - * At this point kvmppc_realmode_hmi_handler may have resync-ed - * the TB, and if it has, we must not subtract the guest timebase - * offset from the timebase. So, skip it. - * - * Also, do not call kvmppc_subcore_exit_guest() because it has - * been invoked as part of kvmppc_realmode_hmi_handler(). - */ - beq 30f - -27: /* Subtract timebase offset from timebase */ ld r8, VCORE_TB_OFFSET_APPL(r5) cmpdi r8,0 @@ -1856,7 +1779,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) addis r8,r8,0x100 /* if so, increment upper 40 bits */ mtspr SPRN_TBU40,r8 -17: bl kvmppc_subcore_exit_guest +17: + /* + * If this is an HMI, we called kvmppc_realmode_hmi_handler + * above, which may or may not have already called + * kvmppc_subcore_exit_guest. Fortunately, all that + * kvmppc_subcore_exit_guest does is clear a flag, so calling + * it again here is benign even if kvmppc_realmode_hmi_handler + * has already called it. + */ + bl kvmppc_subcore_exit_guest nop 30: ld r5,HSTATE_KVM_VCORE(r13) ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ @@ -1910,6 +1842,67 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) mtlr r0 blr +kvmppc_guest_external: + /* External interrupt, first check for host_ipi. If this is + * set, we know the host wants us out so let's do it now + */ + bl kvmppc_read_intr + + /* + * Restore the active volatile registers after returning from + * a C function. + */ + ld r9, HSTATE_KVM_VCPU(r13) + li r12, BOOK3S_INTERRUPT_EXTERNAL + + /* + * kvmppc_read_intr return codes: + * + * Exit to host (r3 > 0) + * 1 An interrupt is pending that needs to be handled by the host + * Exit guest and return to host by branching to guest_exit_cont + * + * 2 Passthrough that needs completion in the host + * Exit guest and return to host by branching to guest_exit_cont + * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD + * to indicate to the host to complete handling the interrupt + * + * Before returning to guest, we check if any CPU is heading out + * to the host and if so, we head out also. If no CPUs are heading + * check return values <= 0. + * + * Return to guest (r3 <= 0) + * 0 No external interrupt is pending + * -1 A guest wakeup IPI (which has now been cleared) + * In either case, we return to guest to deliver any pending + * guest interrupts. + * + * -2 A PCI passthrough external interrupt was handled + * (interrupt was delivered directly to guest) + * Return to guest to deliver any pending guest interrupts. + */ + + cmpdi r3, 1 + ble 1f + + /* Return code = 2 */ + li r12, BOOK3S_INTERRUPT_HV_RM_HARD + stw r12, VCPU_TRAP(r9) + b guest_exit_cont + +1: /* Return code <= 1 */ + cmpdi r3, 0 + bgt guest_exit_cont + + /* Return code <= 0 */ +maybe_reenter_guest: + ld r5, HSTATE_KVM_VCORE(r13) + lwz r0, VCORE_ENTRY_EXIT(r5) + cmpwi r0, 0x100 + mr r4, r9 + blt deliver_guest_interrupt + b guest_exit_cont + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* * Softpatch interrupt for transactional memory emulation cases @@ -2685,13 +2678,7 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) mr r9, r4 cmpdi r3, 0 bgt guest_exit_cont - - /* see if any other thread is already exiting */ - lwz r0,VCORE_ENTRY_EXIT(r5) - cmpwi r0,0x100 - bge guest_exit_cont - - b kvmppc_cede_reentry /* if not go back to guest */ + b maybe_reenter_guest /* cede when already previously prodded case */ kvm_cede_prodded: @@ -2758,12 +2745,12 @@ machine_check_realmode: */ ld r11, VCPU_MSR(r9) rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */ - bne mc_cont /* if so, exit to host */ + bne guest_exit_cont /* if so, exit to host */ /* Check if guest is capable of handling NMI exit */ ld r10, VCPU_KVM(r9) lbz r10, KVM_FWNMI(r10) cmpdi r10, 1 /* FWNMI capable? */ - beq mc_cont /* if so, exit with KVM_EXIT_NMI. */ + beq guest_exit_cont /* if so, exit with KVM_EXIT_NMI. */ /* if not, fall through for backward compatibility. */ andi. r10, r11, MSR_RI /* check for unrecoverable exception */ @@ -2776,6 +2763,21 @@ machine_check_realmode: bl kvmppc_msr_interrupt 2: b fast_interrupt_c_return +/* + * Call C code to handle a HMI in real mode. + * Only the primary thread does the call, secondary threads are handled + * by calling hmi_exception_realmode() after kvmppc_hv_entry returns. + * r9 points to the vcpu on entry + */ +hmi_realmode: + lbz r0, HSTATE_PTID(r13) + cmpwi r0, 0 + bne guest_exit_cont + bl kvmppc_realmode_hmi_handler + ld r9, HSTATE_KVM_VCPU(r13) + li r12, BOOK3S_INTERRUPT_HMI + b guest_exit_cont + /* * Check the reason we woke from nap, and take appropriate action. * Returns (in r3): From 7854f7545bff69567fc3fcd8831752138b892483 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 8 Oct 2018 16:30:53 +1100 Subject: [PATCH 068/204] KVM: PPC: Book3S: Rework TM save/restore code and make it C-callable This adds a parameter to __kvmppc_save_tm and __kvmppc_restore_tm which allows the caller to indicate whether it wants the nonvolatile register state to be preserved across the call, as required by the C calling conventions. This parameter being non-zero also causes the MSR bits that enable TM, FP, VMX and VSX to be preserved. The condition register and DSCR are now always preserved. With this, kvmppc_save_tm_hv and kvmppc_restore_tm_hv can be called from C code provided the 3rd parameter is non-zero. So that these functions can be called from modules, they now include code to set the TOC pointer (r2) on entry, as they can call other built-in C functions which will assume the TOC to have been set. Also, the fake suspend code in kvmppc_save_tm_hv is modified here to assume that treclaim in fake-suspend state does not modify any registers, which is the case on POWER9. This enables the code to be simplified quite a bit. _kvmppc_save_tm_pr and _kvmppc_restore_tm_pr become much simpler with this change, since they now only need to save and restore TAR and pass 1 for the 3rd argument to __kvmppc_{save,restore}_tm. Reviewed-by: David Gibson Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/asm-prototypes.h | 10 + arch/powerpc/kvm/book3s_hv_rmhandlers.S | 49 ++--- arch/powerpc/kvm/tm.S | 252 ++++++++++++---------- 3 files changed, 170 insertions(+), 141 deletions(-) diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 024e8fc905d7..0c1a2b01512a 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -150,6 +150,16 @@ extern s32 patch__memset_nocache, patch__memcpy_nocache; extern long flush_count_cache; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv); +void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv); +#else +static inline void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, + bool preserve_nv) { } +static inline void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr, + bool preserve_nv) { } +#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ + void kvmhv_save_host_pmu(void); void kvmhv_load_host_pmu(void); void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use); diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index fc360b5ca9b1..45dd637d6b44 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -759,11 +759,13 @@ BEGIN_FTR_SECTION b 91f END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) /* - * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR + * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) */ mr r3, r4 ld r4, VCPU_MSR(r3) + li r5, 0 /* don't preserve non-vol regs */ bl kvmppc_restore_tm_hv + nop ld r4, HSTATE_KVM_VCPU(r13) 91: #endif @@ -1603,11 +1605,13 @@ BEGIN_FTR_SECTION b 91f END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) /* - * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR + * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) */ mr r3, r9 ld r4, VCPU_MSR(r3) + li r5, 0 /* don't preserve non-vol regs */ bl kvmppc_save_tm_hv + nop ld r9, HSTATE_KVM_VCPU(r13) 91: #endif @@ -2486,11 +2490,13 @@ BEGIN_FTR_SECTION b 91f END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) /* - * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR + * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) */ ld r3, HSTATE_KVM_VCPU(r13) ld r4, VCPU_MSR(r3) + li r5, 0 /* don't preserve non-vol regs */ bl kvmppc_save_tm_hv + nop 91: #endif @@ -2606,11 +2612,13 @@ BEGIN_FTR_SECTION b 91f END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) /* - * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR + * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) */ mr r3, r4 ld r4, VCPU_MSR(r3) + li r5, 0 /* don't preserve non-vol regs */ bl kvmppc_restore_tm_hv + nop ld r4, HSTATE_KVM_VCPU(r13) 91: #endif @@ -2943,10 +2951,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) * Save transactional state and TM-related registers. * Called with r3 pointing to the vcpu struct and r4 containing * the guest MSR value. - * This can modify all checkpointed registers, but + * r5 is non-zero iff non-volatile register state needs to be maintained. + * If r5 == 0, this can modify all checkpointed registers, but * restores r1 and r2 before exit. */ -kvmppc_save_tm_hv: +_GLOBAL_TOC(kvmppc_save_tm_hv) +EXPORT_SYMBOL_GPL(kvmppc_save_tm_hv) /* See if we need to handle fake suspend mode */ BEGIN_FTR_SECTION b __kvmppc_save_tm @@ -2974,12 +2984,6 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) nop - std r1, HSTATE_HOST_R1(r13) - - /* Clear the MSR RI since r1, r13 may be foobar. */ - li r5, 0 - mtmsrd r5, 1 - /* We have to treclaim here because that's the only way to do S->N */ li r3, TM_CAUSE_KVM_RESCHED TRECLAIM(R3) @@ -2988,22 +2992,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) * We were in fake suspend, so we are not going to save the * register state as the guest checkpointed state (since * we already have it), therefore we can now use any volatile GPR. + * In fact treclaim in fake suspend state doesn't modify + * any registers. */ - /* Reload PACA pointer, stack pointer and TOC. */ - GET_PACA(r13) - ld r1, HSTATE_HOST_R1(r13) - ld r2, PACATOC(r13) - /* Set MSR RI now we have r1 and r13 back. */ - li r5, MSR_RI - mtmsrd r5, 1 - - HMT_MEDIUM - ld r6, HSTATE_DSCR(r13) - mtspr SPRN_DSCR, r6 -BEGIN_FTR_SECTION_NESTED(96) +BEGIN_FTR_SECTION bl pnv_power9_force_smt4_release -END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96) +END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) nop 4: @@ -3029,10 +3024,12 @@ END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96) * Restore transactional state and TM-related registers. * Called with r3 pointing to the vcpu struct * and r4 containing the guest MSR value. + * r5 is non-zero iff non-volatile register state needs to be maintained. * This potentially modifies all checkpointed registers. * It restores r1 and r2 from the PACA. */ -kvmppc_restore_tm_hv: +_GLOBAL_TOC(kvmppc_restore_tm_hv) +EXPORT_SYMBOL_GPL(kvmppc_restore_tm_hv) /* * If we are doing TM emulation for the guest on a POWER9 DD2, * then we don't actually do a trechkpt -- we either set up diff --git a/arch/powerpc/kvm/tm.S b/arch/powerpc/kvm/tm.S index 90e330f21356..0531a1492fdf 100644 --- a/arch/powerpc/kvm/tm.S +++ b/arch/powerpc/kvm/tm.S @@ -28,17 +28,25 @@ * Save transactional state and TM-related registers. * Called with: * - r3 pointing to the vcpu struct - * - r4 points to the MSR with current TS bits: + * - r4 containing the MSR with current TS bits: * (For HV KVM, it is VCPU_MSR ; For PR KVM, it is host MSR). - * This can modify all checkpointed registers, but - * restores r1, r2 before exit. + * - r5 containing a flag indicating that non-volatile registers + * must be preserved. + * If r5 == 0, this can modify all checkpointed registers, but + * restores r1, r2 before exit. If r5 != 0, this restores the + * MSR TM/FP/VEC/VSX bits to their state on entry. */ _GLOBAL(__kvmppc_save_tm) mflr r0 std r0, PPC_LR_STKOFF(r1) + stdu r1, -SWITCH_FRAME_SIZE(r1) + + mr r9, r3 + cmpdi cr7, r5, 0 /* Turn on TM. */ mfmsr r8 + mr r10, r8 li r0, 1 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG ori r8, r8, MSR_FP @@ -51,6 +59,27 @@ _GLOBAL(__kvmppc_save_tm) std r1, HSTATE_SCRATCH2(r13) std r3, HSTATE_SCRATCH1(r13) + /* Save CR on the stack - even if r5 == 0 we need to get cr7 back. */ + mfcr r6 + SAVE_GPR(6, r1) + + /* Save DSCR so we can restore it to avoid running with user value */ + mfspr r7, SPRN_DSCR + SAVE_GPR(7, r1) + + /* + * We are going to do treclaim., which will modify all checkpointed + * registers. Save the non-volatile registers on the stack if + * preservation of non-volatile state has been requested. + */ + beq cr7, 3f + SAVE_NVGPRS(r1) + + /* MSR[TS] will be 0 (non-transactional) once we do treclaim. */ + li r0, 0 + rldimi r10, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG + SAVE_GPR(10, r1) /* final MSR value */ +3: #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE BEGIN_FTR_SECTION /* Emulation of the treclaim instruction needs TEXASR before treclaim */ @@ -74,22 +103,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) std r9, PACATMSCRATCH(r13) ld r9, HSTATE_SCRATCH1(r13) - /* Get a few more GPRs free. */ - std r29, VCPU_GPRS_TM(29)(r9) - std r30, VCPU_GPRS_TM(30)(r9) - std r31, VCPU_GPRS_TM(31)(r9) - - /* Save away PPR and DSCR soon so don't run with user values. */ - mfspr r31, SPRN_PPR + /* Save away PPR soon so we don't run with user value. */ + std r0, VCPU_GPRS_TM(0)(r9) + mfspr r0, SPRN_PPR HMT_MEDIUM - mfspr r30, SPRN_DSCR -#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE - ld r29, HSTATE_DSCR(r13) - mtspr SPRN_DSCR, r29 -#endif - /* Save all but r9, r13 & r29-r31 */ - reg = 0 + /* Reload stack pointer. */ + std r1, VCPU_GPRS_TM(1)(r9) + ld r1, HSTATE_SCRATCH2(r13) + + /* Set MSR RI now we have r1 and r13 back. */ + std r2, VCPU_GPRS_TM(2)(r9) + li r2, MSR_RI + mtmsrd r2, 1 + + /* Reload TOC pointer. */ + ld r2, PACATOC(r13) + + /* Save all but r0-r2, r9 & r13 */ + reg = 3 .rept 29 .if (reg != 9) && (reg != 13) std reg, VCPU_GPRS_TM(reg)(r9) @@ -103,33 +135,29 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) ld r4, PACATMSCRATCH(r13) std r4, VCPU_GPRS_TM(9)(r9) - /* Reload stack pointer and TOC. */ - ld r1, HSTATE_SCRATCH2(r13) - ld r2, PACATOC(r13) - - /* Set MSR RI now we have r1 and r13 back. */ - li r5, MSR_RI - mtmsrd r5, 1 - - /* Save away checkpinted SPRs. */ - std r31, VCPU_PPR_TM(r9) - std r30, VCPU_DSCR_TM(r9) - mflr r5 + /* Restore host DSCR and CR values, after saving guest values */ mfcr r6 + mfspr r7, SPRN_DSCR + stw r6, VCPU_CR_TM(r9) + std r7, VCPU_DSCR_TM(r9) + REST_GPR(6, r1) + REST_GPR(7, r1) + mtcr r6 + mtspr SPRN_DSCR, r7 + + /* Save away checkpointed SPRs. */ + std r0, VCPU_PPR_TM(r9) + mflr r5 mfctr r7 mfspr r8, SPRN_AMR mfspr r10, SPRN_TAR mfxer r11 std r5, VCPU_LR_TM(r9) - stw r6, VCPU_CR_TM(r9) std r7, VCPU_CTR_TM(r9) std r8, VCPU_AMR_TM(r9) std r10, VCPU_TAR_TM(r9) std r11, VCPU_XER_TM(r9) - /* Restore r12 as trap number. */ - lwz r12, VCPU_TRAP(r9) - /* Save FP/VSX. */ addi r3, r9, VCPU_FPRS_TM bl store_fp_state @@ -137,6 +165,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) bl store_vr_state mfspr r6, SPRN_VRSAVE stw r6, VCPU_VRSAVE_TM(r9) + + /* Restore non-volatile registers if requested to */ + beq cr7, 1f + REST_NVGPRS(r1) + REST_GPR(10, r1) 1: /* * We need to save these SPRs after the treclaim so that the software @@ -146,12 +179,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) */ mfspr r7, SPRN_TEXASR std r7, VCPU_TEXASR(r9) -11: mfspr r5, SPRN_TFHAR mfspr r6, SPRN_TFIAR std r5, VCPU_TFHAR(r9) std r6, VCPU_TFIAR(r9) + /* Restore MSR state if requested */ + beq cr7, 2f + mtmsrd r10, 0 +2: + addi r1, r1, SWITCH_FRAME_SIZE ld r0, PPC_LR_STKOFF(r1) mtlr r0 blr @@ -161,49 +198,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) * be invoked from C function by PR KVM only. */ _GLOBAL(_kvmppc_save_tm_pr) - mflr r5 - std r5, PPC_LR_STKOFF(r1) - stdu r1, -SWITCH_FRAME_SIZE(r1) - SAVE_NVGPRS(r1) - - /* save MSR since TM/math bits might be impacted - * by __kvmppc_save_tm(). - */ - mfmsr r5 - SAVE_GPR(5, r1) - - /* also save DSCR/CR/TAR so that it can be recovered later */ - mfspr r6, SPRN_DSCR - SAVE_GPR(6, r1) - - mfcr r7 - stw r7, _CCR(r1) + mflr r0 + std r0, PPC_LR_STKOFF(r1) + stdu r1, -PPC_MIN_STKFRM(r1) mfspr r8, SPRN_TAR - SAVE_GPR(8, r1) + std r8, PPC_MIN_STKFRM-8(r1) + li r5, 1 /* preserve non-volatile registers */ bl __kvmppc_save_tm - REST_GPR(8, r1) + ld r8, PPC_MIN_STKFRM-8(r1) mtspr SPRN_TAR, r8 - ld r7, _CCR(r1) - mtcr r7 - - REST_GPR(6, r1) - mtspr SPRN_DSCR, r6 - - /* need preserve current MSR's MSR_TS bits */ - REST_GPR(5, r1) - mfmsr r6 - rldicl r6, r6, 64 - MSR_TS_S_LG, 62 - rldimi r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG - mtmsrd r5 - - REST_NVGPRS(r1) - addi r1, r1, SWITCH_FRAME_SIZE - ld r5, PPC_LR_STKOFF(r1) - mtlr r5 + addi r1, r1, PPC_MIN_STKFRM + ld r0, PPC_LR_STKOFF(r1) + mtlr r0 blr EXPORT_SYMBOL_GPL(_kvmppc_save_tm_pr); @@ -215,15 +225,21 @@ EXPORT_SYMBOL_GPL(_kvmppc_save_tm_pr); * - r4 is the guest MSR with desired TS bits: * For HV KVM, it is VCPU_MSR * For PR KVM, it is provided by caller - * This potentially modifies all checkpointed registers. - * It restores r1, r2 from the PACA. + * - r5 containing a flag indicating that non-volatile registers + * must be preserved. + * If r5 == 0, this potentially modifies all checkpointed registers, but + * restores r1, r2 from the PACA before exit. + * If r5 != 0, this restores the MSR TM/FP/VEC/VSX bits to their state on entry. */ _GLOBAL(__kvmppc_restore_tm) mflr r0 std r0, PPC_LR_STKOFF(r1) + cmpdi cr7, r5, 0 + /* Turn on TM/FP/VSX/VMX so we can restore them. */ mfmsr r5 + mr r10, r5 li r6, MSR_TM >> 32 sldi r6, r6, 32 or r5, r5, r6 @@ -244,8 +260,7 @@ _GLOBAL(__kvmppc_restore_tm) mr r5, r4 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 - beqlr /* TM not active in guest */ - std r1, HSTATE_SCRATCH2(r13) + beq 9f /* TM not active in guest */ /* Make sure the failure summary is set, otherwise we'll program check * when we trechkpt. It's possible that this might have been not set @@ -255,6 +270,26 @@ _GLOBAL(__kvmppc_restore_tm) oris r7, r7, (TEXASR_FS)@h mtspr SPRN_TEXASR, r7 + /* + * Make a stack frame and save non-volatile registers if requested. + */ + stdu r1, -SWITCH_FRAME_SIZE(r1) + std r1, HSTATE_SCRATCH2(r13) + + mfcr r6 + mfspr r7, SPRN_DSCR + SAVE_GPR(2, r1) + SAVE_GPR(6, r1) + SAVE_GPR(7, r1) + + beq cr7, 4f + SAVE_NVGPRS(r1) + + /* MSR[TS] will be 1 (suspended) once we do trechkpt */ + li r0, 1 + rldimi r10, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG + SAVE_GPR(10, r1) /* final MSR value */ +4: /* * We need to load up the checkpointed state for the guest. * We need to do this early as it will blow away any GPRs, VSRs and @@ -291,8 +326,6 @@ _GLOBAL(__kvmppc_restore_tm) ld r29, VCPU_DSCR_TM(r3) ld r30, VCPU_PPR_TM(r3) - std r2, PACATMSCRATCH(r13) /* Save TOC */ - /* Clear the MSR RI since r1, r13 are all going to be foobar. */ li r5, 0 mtmsrd r5, 1 @@ -318,18 +351,31 @@ _GLOBAL(__kvmppc_restore_tm) /* Now let's get back the state we need. */ HMT_MEDIUM GET_PACA(r13) -#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE - ld r29, HSTATE_DSCR(r13) - mtspr SPRN_DSCR, r29 -#endif ld r1, HSTATE_SCRATCH2(r13) - ld r2, PACATMSCRATCH(r13) + REST_GPR(7, r1) + mtspr SPRN_DSCR, r7 /* Set the MSR RI since we have our registers back. */ li r5, MSR_RI mtmsrd r5, 1 + + /* Restore TOC pointer and CR */ + REST_GPR(2, r1) + REST_GPR(6, r1) + mtcr r6 + + /* Restore non-volatile registers if requested to. */ + beq cr7, 5f + REST_GPR(10, r1) + REST_NVGPRS(r1) + +5: addi r1, r1, SWITCH_FRAME_SIZE ld r0, PPC_LR_STKOFF(r1) mtlr r0 + +9: /* Restore MSR bits if requested */ + beqlr cr7 + mtmsrd r10, 0 blr /* @@ -337,47 +383,23 @@ _GLOBAL(__kvmppc_restore_tm) * can be invoked from C function by PR KVM only. */ _GLOBAL(_kvmppc_restore_tm_pr) - mflr r5 - std r5, PPC_LR_STKOFF(r1) - stdu r1, -SWITCH_FRAME_SIZE(r1) - SAVE_NVGPRS(r1) - - /* save MSR to avoid TM/math bits change */ - mfmsr r5 - SAVE_GPR(5, r1) - - /* also save DSCR/CR/TAR so that it can be recovered later */ - mfspr r6, SPRN_DSCR - SAVE_GPR(6, r1) - - mfcr r7 - stw r7, _CCR(r1) + mflr r0 + std r0, PPC_LR_STKOFF(r1) + stdu r1, -PPC_MIN_STKFRM(r1) + /* save TAR so that it can be recovered later */ mfspr r8, SPRN_TAR - SAVE_GPR(8, r1) + std r8, PPC_MIN_STKFRM-8(r1) + li r5, 1 bl __kvmppc_restore_tm - REST_GPR(8, r1) + ld r8, PPC_MIN_STKFRM-8(r1) mtspr SPRN_TAR, r8 - ld r7, _CCR(r1) - mtcr r7 - - REST_GPR(6, r1) - mtspr SPRN_DSCR, r6 - - /* need preserve current MSR's MSR_TS bits */ - REST_GPR(5, r1) - mfmsr r6 - rldicl r6, r6, 64 - MSR_TS_S_LG, 62 - rldimi r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG - mtmsrd r5 - - REST_NVGPRS(r1) - addi r1, r1, SWITCH_FRAME_SIZE - ld r5, PPC_LR_STKOFF(r1) - mtlr r5 + addi r1, r1, PPC_MIN_STKFRM + ld r0, PPC_LR_STKOFF(r1) + mtlr r0 blr EXPORT_SYMBOL_GPL(_kvmppc_restore_tm_pr); From 53655ddd7771f703071ac81cc5823eae509b6653 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 8 Oct 2018 16:30:54 +1100 Subject: [PATCH 069/204] KVM: PPC: Book3S HV: Call kvmppc_handle_exit_hv() with vcore unlocked Currently kvmppc_handle_exit_hv() is called with the vcore lock held because it is called within a for_each_runnable_thread loop. However, we already unlock the vcore within kvmppc_handle_exit_hv() under certain circumstances, and this is safe because (a) any vcpus that become runnable and are added to the runnable set by kvmppc_run_vcpu() have their vcpu->arch.trap == 0 and can't actually run in the guest (because the vcore state is VCORE_EXITING), and (b) for_each_runnable_thread is safe against addition or removal of vcpus from the runnable set. Therefore, in order to simplify things for following patches, let's drop the vcore lock in the for_each_runnable_thread loop, so kvmppc_handle_exit_hv() gets called without the vcore lock held. Reviewed-by: David Gibson Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/kvm/book3s_hv.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 49a686c1185c..0e17593ee6e3 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1084,7 +1084,6 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) return RESUME_GUEST; } -/* Called with vcpu->arch.vcore->lock held */ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, struct task_struct *tsk) { @@ -1205,10 +1204,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, swab32(vcpu->arch.emul_inst) : vcpu->arch.emul_inst; if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { - /* Need vcore unlocked to call kvmppc_get_last_inst */ - spin_unlock(&vcpu->arch.vcore->lock); r = kvmppc_emulate_debug_inst(run, vcpu); - spin_lock(&vcpu->arch.vcore->lock); } else { kvmppc_core_queue_program(vcpu, SRR1_PROGILL); r = RESUME_GUEST; @@ -1224,12 +1220,8 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: r = EMULATE_FAIL; if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) && - cpu_has_feature(CPU_FTR_ARCH_300)) { - /* Need vcore unlocked to call kvmppc_get_last_inst */ - spin_unlock(&vcpu->arch.vcore->lock); + cpu_has_feature(CPU_FTR_ARCH_300)) r = kvmppc_emulate_doorbell_instr(vcpu); - spin_lock(&vcpu->arch.vcore->lock); - } if (r == EMULATE_FAIL) { kvmppc_core_queue_program(vcpu, SRR1_PROGILL); r = RESUME_GUEST; @@ -2599,6 +2591,14 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master) spin_lock(&vc->lock); now = get_tb(); for_each_runnable_thread(i, vcpu, vc) { + /* + * It's safe to unlock the vcore in the loop here, because + * for_each_runnable_thread() is safe against removal of + * the vcpu, and the vcore state is VCORE_EXITING here, + * so any vcpus becoming runnable will have their arch.trap + * set to zero and can't actually run in the guest. + */ + spin_unlock(&vc->lock); /* cancel pending dec exception if dec is positive */ if (now < vcpu->arch.dec_expires && kvmppc_core_pending_dec(vcpu)) @@ -2614,6 +2614,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master) vcpu->arch.ret = ret; vcpu->arch.trap = 0; + spin_lock(&vc->lock); if (is_kvmppc_resume_guest(vcpu->arch.ret)) { if (vcpu->arch.pending_exceptions) kvmppc_core_prepare_to_enter(vcpu); From 95a6432ce903858a2f285d611275340aa574c6ac Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 8 Oct 2018 16:30:55 +1100 Subject: [PATCH 070/204] KVM: PPC: Book3S HV: Streamlined guest entry/exit path on P9 for radix guests This creates an alternative guest entry/exit path which is used for radix guests on POWER9 systems when we have indep_threads_mode=Y. In these circumstances there is exactly one vcpu per vcore and there is no coordination required between vcpus or vcores; the vcpu can enter the guest without needing to synchronize with anything else. The new fast path is implemented almost entirely in C in book3s_hv.c and runs with the MMU on until the guest is entered. On guest exit we use the existing path until the point where we are committed to exiting the guest (as distinct from handling an interrupt in the low-level code and returning to the guest) and we have pulled the guest context from the XIVE. At that point we check a flag in the stack frame to see whether we came in via the old path and the new path; if we came in via the new path then we go back to C code to do the rest of the process of saving the guest context and restoring the host context. The C code is split into separate functions for handling the OS-accessible state and the hypervisor state, with the idea that the latter can be replaced by a hypercall when we implement nested virtualization. Signed-off-by: Paul Mackerras Reviewed-by: David Gibson [mpe: Fix CONFIG_ALTIVEC=n build] Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/asm-prototypes.h | 2 + arch/powerpc/include/asm/kvm_ppc.h | 2 + arch/powerpc/kvm/book3s_hv.c | 433 +++++++++++++++++++++- arch/powerpc/kvm/book3s_hv_ras.c | 2 + arch/powerpc/kvm/book3s_hv_rmhandlers.S | 95 ++++- arch/powerpc/kvm/book3s_xive.c | 63 ++++ 6 files changed, 593 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 0c1a2b01512a..5c9b00cec4f2 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -165,4 +165,6 @@ void kvmhv_load_host_pmu(void); void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use); void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu); +int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu); + #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 03a60f76f3d7..88362ccda549 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -583,6 +583,7 @@ extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval); extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, bool line_status); +extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu); #else static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority) { return -1; } @@ -605,6 +606,7 @@ static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { retur static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, bool line_status) { return -ENODEV; } +static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { } #endif /* CONFIG_KVM_XIVE */ /* diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 0e17593ee6e3..4befa5a812fe 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -3079,6 +3079,273 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) trace_kvmppc_run_core(vc, 1); } +/* + * Load up hypervisor-mode registers on P9. + */ +static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit) +{ + struct kvmppc_vcore *vc = vcpu->arch.vcore; + s64 hdec; + u64 tb, purr, spurr; + int trap; + unsigned long host_hfscr = mfspr(SPRN_HFSCR); + unsigned long host_ciabr = mfspr(SPRN_CIABR); + unsigned long host_dawr = mfspr(SPRN_DAWR); + unsigned long host_dawrx = mfspr(SPRN_DAWRX); + unsigned long host_psscr = mfspr(SPRN_PSSCR); + unsigned long host_pidr = mfspr(SPRN_PID); + + hdec = time_limit - mftb(); + if (hdec < 0) + return BOOK3S_INTERRUPT_HV_DECREMENTER; + mtspr(SPRN_HDEC, hdec); + + if (vc->tb_offset) { + u64 new_tb = mftb() + vc->tb_offset; + mtspr(SPRN_TBU40, new_tb); + tb = mftb(); + if ((tb & 0xffffff) < (new_tb & 0xffffff)) + mtspr(SPRN_TBU40, new_tb + 0x1000000); + vc->tb_offset_applied = vc->tb_offset; + } + + if (vc->pcr) + mtspr(SPRN_PCR, vc->pcr); + mtspr(SPRN_DPDES, vc->dpdes); + mtspr(SPRN_VTB, vc->vtb); + + local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR); + local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR); + mtspr(SPRN_PURR, vcpu->arch.purr); + mtspr(SPRN_SPURR, vcpu->arch.spurr); + + if (cpu_has_feature(CPU_FTR_DAWR)) { + mtspr(SPRN_DAWR, vcpu->arch.dawr); + mtspr(SPRN_DAWRX, vcpu->arch.dawrx); + } + mtspr(SPRN_CIABR, vcpu->arch.ciabr); + mtspr(SPRN_IC, vcpu->arch.ic); + mtspr(SPRN_PID, vcpu->arch.pid); + + mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC | + (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG)); + + mtspr(SPRN_HFSCR, vcpu->arch.hfscr); + + mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0); + mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1); + mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2); + mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3); + + mtspr(SPRN_AMOR, ~0UL); + + mtspr(SPRN_LPCR, vc->lpcr); + isync(); + + kvmppc_xive_push_vcpu(vcpu); + + mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0); + mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1); + + trap = __kvmhv_vcpu_entry_p9(vcpu); + + /* Advance host PURR/SPURR by the amount used by guest */ + purr = mfspr(SPRN_PURR); + spurr = mfspr(SPRN_SPURR); + mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr + + purr - vcpu->arch.purr); + mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr + + spurr - vcpu->arch.spurr); + vcpu->arch.purr = purr; + vcpu->arch.spurr = spurr; + + vcpu->arch.ic = mfspr(SPRN_IC); + vcpu->arch.pid = mfspr(SPRN_PID); + vcpu->arch.psscr = mfspr(SPRN_PSSCR) & PSSCR_GUEST_VIS; + + vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0); + vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1); + vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2); + vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3); + + mtspr(SPRN_PSSCR, host_psscr); + mtspr(SPRN_HFSCR, host_hfscr); + mtspr(SPRN_CIABR, host_ciabr); + mtspr(SPRN_DAWR, host_dawr); + mtspr(SPRN_DAWRX, host_dawrx); + mtspr(SPRN_PID, host_pidr); + + /* + * Since this is radix, do a eieio; tlbsync; ptesync sequence in + * case we interrupted the guest between a tlbie and a ptesync. + */ + asm volatile("eieio; tlbsync; ptesync"); + + mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid); /* restore host LPID */ + isync(); + + vc->dpdes = mfspr(SPRN_DPDES); + vc->vtb = mfspr(SPRN_VTB); + mtspr(SPRN_DPDES, 0); + if (vc->pcr) + mtspr(SPRN_PCR, 0); + + if (vc->tb_offset_applied) { + u64 new_tb = mftb() - vc->tb_offset_applied; + mtspr(SPRN_TBU40, new_tb); + tb = mftb(); + if ((tb & 0xffffff) < (new_tb & 0xffffff)) + mtspr(SPRN_TBU40, new_tb + 0x1000000); + vc->tb_offset_applied = 0; + } + + mtspr(SPRN_HDEC, 0x7fffffff); + mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr); + + return trap; +} + +/* + * Virtual-mode guest entry for POWER9 and later when the host and + * guest are both using the radix MMU. The LPIDR has already been set. + */ +int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit) +{ + struct kvmppc_vcore *vc = vcpu->arch.vcore; + unsigned long host_dscr = mfspr(SPRN_DSCR); + unsigned long host_tidr = mfspr(SPRN_TIDR); + unsigned long host_iamr = mfspr(SPRN_IAMR); + s64 dec; + u64 tb; + int trap, save_pmu; + + dec = mfspr(SPRN_DEC); + tb = mftb(); + if (dec < 512) + return BOOK3S_INTERRUPT_HV_DECREMENTER; + local_paca->kvm_hstate.dec_expires = dec + tb; + if (local_paca->kvm_hstate.dec_expires < time_limit) + time_limit = local_paca->kvm_hstate.dec_expires; + + vcpu->arch.ceded = 0; + + kvmhv_save_host_pmu(); /* saves it to PACA kvm_hstate */ + + kvmppc_subcore_enter_guest(); + + vc->entry_exit_map = 1; + vc->in_guest = 1; + + if (vcpu->arch.vpa.pinned_addr) { + struct lppaca *lp = vcpu->arch.vpa.pinned_addr; + u32 yield_count = be32_to_cpu(lp->yield_count) + 1; + lp->yield_count = cpu_to_be32(yield_count); + vcpu->arch.vpa.dirty = 1; + } + + if (cpu_has_feature(CPU_FTR_TM) || + cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) + kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true); + + kvmhv_load_guest_pmu(vcpu); + + msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX); + load_fp_state(&vcpu->arch.fp); +#ifdef CONFIG_ALTIVEC + load_vr_state(&vcpu->arch.vr); +#endif + + mtspr(SPRN_DSCR, vcpu->arch.dscr); + mtspr(SPRN_IAMR, vcpu->arch.iamr); + mtspr(SPRN_PSPB, vcpu->arch.pspb); + mtspr(SPRN_FSCR, vcpu->arch.fscr); + mtspr(SPRN_TAR, vcpu->arch.tar); + mtspr(SPRN_EBBHR, vcpu->arch.ebbhr); + mtspr(SPRN_EBBRR, vcpu->arch.ebbrr); + mtspr(SPRN_BESCR, vcpu->arch.bescr); + mtspr(SPRN_WORT, vcpu->arch.wort); + mtspr(SPRN_TIDR, vcpu->arch.tid); + mtspr(SPRN_DAR, vcpu->arch.shregs.dar); + mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr); + mtspr(SPRN_AMR, vcpu->arch.amr); + mtspr(SPRN_UAMOR, vcpu->arch.uamor); + + if (!(vcpu->arch.ctrl & 1)) + mtspr(SPRN_CTRLT, mfspr(SPRN_CTRLF) & ~1); + + mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb()); + + if (vcpu->arch.doorbell_request) { + vc->dpdes = 1; + smp_wmb(); + vcpu->arch.doorbell_request = 0; + } + + trap = kvmhv_load_hv_regs_and_go(vcpu, time_limit); + + vcpu->arch.slb_max = 0; + dec = mfspr(SPRN_DEC); + tb = mftb(); + vcpu->arch.dec_expires = dec + tb; + vcpu->cpu = -1; + vcpu->arch.thread_cpu = -1; + vcpu->arch.ctrl = mfspr(SPRN_CTRLF); + + vcpu->arch.iamr = mfspr(SPRN_IAMR); + vcpu->arch.pspb = mfspr(SPRN_PSPB); + vcpu->arch.fscr = mfspr(SPRN_FSCR); + vcpu->arch.tar = mfspr(SPRN_TAR); + vcpu->arch.ebbhr = mfspr(SPRN_EBBHR); + vcpu->arch.ebbrr = mfspr(SPRN_EBBRR); + vcpu->arch.bescr = mfspr(SPRN_BESCR); + vcpu->arch.wort = mfspr(SPRN_WORT); + vcpu->arch.tid = mfspr(SPRN_TIDR); + vcpu->arch.amr = mfspr(SPRN_AMR); + vcpu->arch.uamor = mfspr(SPRN_UAMOR); + vcpu->arch.dscr = mfspr(SPRN_DSCR); + + mtspr(SPRN_PSPB, 0); + mtspr(SPRN_WORT, 0); + mtspr(SPRN_AMR, 0); + mtspr(SPRN_UAMOR, 0); + mtspr(SPRN_DSCR, host_dscr); + mtspr(SPRN_TIDR, host_tidr); + mtspr(SPRN_IAMR, host_iamr); + mtspr(SPRN_PSPB, 0); + + msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX); + store_fp_state(&vcpu->arch.fp); +#ifdef CONFIG_ALTIVEC + store_vr_state(&vcpu->arch.vr); +#endif + + if (cpu_has_feature(CPU_FTR_TM) || + cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) + kvmppc_save_tm_hv(vcpu, vcpu->arch.shregs.msr, true); + + save_pmu = 1; + if (vcpu->arch.vpa.pinned_addr) { + struct lppaca *lp = vcpu->arch.vpa.pinned_addr; + u32 yield_count = be32_to_cpu(lp->yield_count) + 1; + lp->yield_count = cpu_to_be32(yield_count); + vcpu->arch.vpa.dirty = 1; + save_pmu = lp->pmcregs_in_use; + } + + kvmhv_save_guest_pmu(vcpu, save_pmu); + + vc->entry_exit_map = 0x101; + vc->in_guest = 0; + + mtspr(SPRN_DEC, local_paca->kvm_hstate.dec_expires - mftb()); + + kvmhv_load_host_pmu(); + + kvmppc_subcore_exit_guest(); + + return trap; +} + /* * Wait for some other vcpu thread to execute us, and * wake us up when we need to handle something in the host. @@ -3405,6 +3672,167 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) return vcpu->arch.ret; } +static int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, + struct kvm_vcpu *vcpu, u64 time_limit) +{ + int trap, r, pcpu, pcpu0; + int srcu_idx; + struct kvmppc_vcore *vc; + struct kvm *kvm = vcpu->kvm; + + trace_kvmppc_run_vcpu_enter(vcpu); + + kvm_run->exit_reason = 0; + vcpu->arch.ret = RESUME_GUEST; + vcpu->arch.trap = 0; + + vc = vcpu->arch.vcore; + vcpu->arch.ceded = 0; + vcpu->arch.run_task = current; + vcpu->arch.kvm_run = kvm_run; + vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); + vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; + vcpu->arch.busy_preempt = TB_NIL; + vcpu->arch.last_inst = KVM_INST_FETCH_FAILED; + vc->runnable_threads[0] = vcpu; + vc->n_runnable = 1; + vc->runner = vcpu; + + /* See if the MMU is ready to go */ + if (!kvm->arch.mmu_ready) { + r = kvmhv_setup_mmu(vcpu); + if (r) { + kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; + kvm_run->fail_entry. + hardware_entry_failure_reason = 0; + vcpu->arch.ret = r; + goto out; + } + } + + if (need_resched()) + cond_resched(); + + kvmppc_update_vpas(vcpu); + + init_vcore_to_run(vc); + vc->preempt_tb = TB_NIL; + + preempt_disable(); + pcpu = smp_processor_id(); + vc->pcpu = pcpu; + kvmppc_prepare_radix_vcpu(vcpu, pcpu); + + local_irq_disable(); + hard_irq_disable(); + if (signal_pending(current)) + goto sigpend; + if (lazy_irq_pending() || need_resched() || !kvm->arch.mmu_ready) + goto out; + + kvmppc_core_prepare_to_enter(vcpu); + + kvmppc_clear_host_core(pcpu); + + local_paca->kvm_hstate.tid = 0; + local_paca->kvm_hstate.napping = 0; + local_paca->kvm_hstate.kvm_split_mode = NULL; + kvmppc_start_thread(vcpu, vc); + kvmppc_create_dtl_entry(vcpu, vc); + trace_kvm_guest_enter(vcpu); + + vc->vcore_state = VCORE_RUNNING; + trace_kvmppc_run_core(vc, 0); + + mtspr(SPRN_LPID, vc->kvm->arch.lpid); + isync(); + + /* See comment above in kvmppc_run_core() about this */ + pcpu0 = pcpu; + if (cpu_has_feature(CPU_FTR_ARCH_300)) + pcpu0 &= ~0x3UL; + + if (cpumask_test_cpu(pcpu0, &kvm->arch.need_tlb_flush)) { + radix__local_flush_tlb_lpid_guest(kvm->arch.lpid); + /* Clear the bit after the TLB flush */ + cpumask_clear_cpu(pcpu0, &kvm->arch.need_tlb_flush); + } + + trace_hardirqs_on(); + guest_enter_irqoff(); + + srcu_idx = srcu_read_lock(&kvm->srcu); + + this_cpu_disable_ftrace(); + + trap = kvmhv_p9_guest_entry(vcpu, time_limit); + vcpu->arch.trap = trap; + + this_cpu_enable_ftrace(); + + srcu_read_unlock(&kvm->srcu, srcu_idx); + + mtspr(SPRN_LPID, kvm->arch.host_lpid); + isync(); + + trace_hardirqs_off(); + set_irq_happened(trap); + + kvmppc_set_host_core(pcpu); + + local_irq_enable(); + guest_exit(); + + cpumask_clear_cpu(pcpu, &kvm->arch.cpu_in_guest); + + preempt_enable(); + + /* cancel pending decrementer exception if DEC is now positive */ + if (get_tb() < vcpu->arch.dec_expires && kvmppc_core_pending_dec(vcpu)) + kvmppc_core_dequeue_dec(vcpu); + + trace_kvm_guest_exit(vcpu); + r = RESUME_GUEST; + if (trap) + r = kvmppc_handle_exit_hv(kvm_run, vcpu, current); + vcpu->arch.ret = r; + + if (is_kvmppc_resume_guest(r) && vcpu->arch.ceded && + !kvmppc_vcpu_woken(vcpu)) { + kvmppc_set_timer(vcpu); + while (vcpu->arch.ceded && !kvmppc_vcpu_woken(vcpu)) { + if (signal_pending(current)) { + vcpu->stat.signal_exits++; + kvm_run->exit_reason = KVM_EXIT_INTR; + vcpu->arch.ret = -EINTR; + break; + } + spin_lock(&vc->lock); + kvmppc_vcore_blocked(vc); + spin_unlock(&vc->lock); + } + } + vcpu->arch.ceded = 0; + + vc->vcore_state = VCORE_INACTIVE; + trace_kvmppc_run_core(vc, 1); + + done: + kvmppc_remove_runnable(vc, vcpu); + trace_kvmppc_run_vcpu_exit(vcpu, kvm_run); + + return vcpu->arch.ret; + + sigpend: + vcpu->stat.signal_exits++; + kvm_run->exit_reason = KVM_EXIT_INTR; + vcpu->arch.ret = -EINTR; + out: + local_irq_enable(); + preempt_enable(); + goto done; +} + static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) { int r; @@ -3480,7 +3908,10 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; do { - r = kvmppc_run_vcpu(run, vcpu); + if (kvm->arch.threads_indep && kvm_is_radix(kvm)) + r = kvmhv_run_single_vcpu(run, vcpu, ~(u64)0); + else + r = kvmppc_run_vcpu(run, vcpu); if (run->exit_reason == KVM_EXIT_PAPR_HCALL && !(vcpu->arch.shregs.msr & MSR_PR)) { diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c index ee564b682f0c..0787f12c1a1b 100644 --- a/arch/powerpc/kvm/book3s_hv_ras.c +++ b/arch/powerpc/kvm/book3s_hv_ras.c @@ -177,6 +177,7 @@ void kvmppc_subcore_enter_guest(void) local_paca->sibling_subcore_state->in_guest[subcore_id] = 1; } +EXPORT_SYMBOL_GPL(kvmppc_subcore_enter_guest); void kvmppc_subcore_exit_guest(void) { @@ -187,6 +188,7 @@ void kvmppc_subcore_exit_guest(void) local_paca->sibling_subcore_state->in_guest[subcore_id] = 0; } +EXPORT_SYMBOL_GPL(kvmppc_subcore_exit_guest); static bool kvmppc_tb_resync_required(void) { diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 45dd637d6b44..ea84696b0a3c 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -47,8 +47,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) #define NAPPING_NOVCPU 2 /* Stack frame offsets for kvmppc_hv_entry */ -#define SFS 160 +#define SFS 208 #define STACK_SLOT_TRAP (SFS-4) +#define STACK_SLOT_SHORT_PATH (SFS-8) #define STACK_SLOT_TID (SFS-16) #define STACK_SLOT_PSSCR (SFS-24) #define STACK_SLOT_PID (SFS-32) @@ -57,6 +58,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) #define STACK_SLOT_DAWR (SFS-56) #define STACK_SLOT_DAWRX (SFS-64) #define STACK_SLOT_HFSCR (SFS-72) +/* the following is used by the P9 short path */ +#define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */ /* * Call kvmppc_hv_entry in real mode. @@ -1020,6 +1023,9 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) no_xive: #endif /* CONFIG_KVM_XICS */ + li r0, 0 + stw r0, STACK_SLOT_SHORT_PATH(r1) + deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */ /* Check if we can deliver an external or decrementer interrupt now */ ld r0, VCPU_PENDING_EXC(r4) @@ -1034,13 +1040,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) bl kvmppc_guest_entry_inject_int ld r4, HSTATE_KVM_VCPU(r13) 71: - ld r10, VCPU_PC(r4) - ld r11, VCPU_MSR(r4) ld r6, VCPU_SRR0(r4) ld r7, VCPU_SRR1(r4) mtspr SPRN_SRR0, r6 mtspr SPRN_SRR1, r7 +fast_guest_entry_c: + ld r10, VCPU_PC(r4) + ld r11, VCPU_MSR(r4) /* r11 = vcpu->arch.msr & ~MSR_HV */ rldicl r11, r11, 63 - MSR_HV_LG, 1 rotldi r11, r11, 1 + MSR_HV_LG @@ -1117,6 +1124,83 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) HRFI_TO_GUEST b . +/* + * Enter the guest on a P9 or later system where we have exactly + * one vcpu per vcore and we don't need to go to real mode + * (which implies that host and guest are both using radix MMU mode). + * r3 = vcpu pointer + * Most SPRs and all the VSRs have been loaded already. + */ +_GLOBAL(__kvmhv_vcpu_entry_p9) +EXPORT_SYMBOL_GPL(__kvmhv_vcpu_entry_p9) + mflr r0 + std r0, PPC_LR_STKOFF(r1) + stdu r1, -SFS(r1) + + li r0, 1 + stw r0, STACK_SLOT_SHORT_PATH(r1) + + std r3, HSTATE_KVM_VCPU(r13) + mfcr r4 + stw r4, SFS+8(r1) + + std r1, HSTATE_HOST_R1(r13) + + reg = 14 + .rept 18 + std reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1) + reg = reg + 1 + .endr + + reg = 14 + .rept 18 + ld reg, __VCPU_GPR(reg)(r3) + reg = reg + 1 + .endr + + mfmsr r10 + std r10, HSTATE_HOST_MSR(r13) + + mr r4, r3 + b fast_guest_entry_c +guest_exit_short_path: + + li r0, KVM_GUEST_MODE_NONE + stb r0, HSTATE_IN_GUEST(r13) + + reg = 14 + .rept 18 + std reg, __VCPU_GPR(reg)(r9) + reg = reg + 1 + .endr + + reg = 14 + .rept 18 + ld reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1) + reg = reg + 1 + .endr + + lwz r4, SFS+8(r1) + mtcr r4 + + mr r3, r12 /* trap number */ + + addi r1, r1, SFS + ld r0, PPC_LR_STKOFF(r1) + mtlr r0 + + /* If we are in real mode, do a rfid to get back to the caller */ + mfmsr r4 + andi. r5, r4, MSR_IR + bnelr + rldicl r5, r4, 64 - MSR_TS_S_LG, 62 /* extract TS field */ + mtspr SPRN_SRR0, r0 + ld r10, HSTATE_HOST_MSR(r13) + rldimi r10, r5, MSR_TS_S_LG, 63 - MSR_TS_T_LG + mtspr SPRN_SRR1, r10 + RFI_TO_KERNEL + b . + secondary_too_late: li r12, 0 stw r12, STACK_SLOT_TRAP(r1) @@ -1377,6 +1461,11 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 1: #endif /* CONFIG_KVM_XICS */ + /* If we came in through the P9 short path, go back out to C now */ + lwz r0, STACK_SLOT_SHORT_PATH(r1) + cmpwi r0, 0 + bne guest_exit_short_path + /* For hash guest, read the guest SLB and save it away */ ld r5, VCPU_KVM(r9) lbz r0, KVM_RADIX(r5) diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 30c2eb766954..ad4a370703d3 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -61,6 +61,69 @@ */ #define XIVE_Q_GAP 2 +/* + * Push a vcpu's context to the XIVE on guest entry. + * This assumes we are in virtual mode (MMU on) + */ +void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) +{ + void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt; + u64 pq; + + if (!tima) + return; + eieio(); + __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS); + __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2); + vcpu->arch.xive_pushed = 1; + eieio(); + + /* + * We clear the irq_pending flag. There is a small chance of a + * race vs. the escalation interrupt happening on another + * processor setting it again, but the only consequence is to + * cause a spurious wakeup on the next H_CEDE, which is not an + * issue. + */ + vcpu->arch.irq_pending = 0; + + /* + * In single escalation mode, if the escalation interrupt is + * on, we mask it. + */ + if (vcpu->arch.xive_esc_on) { + pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr + + XIVE_ESB_SET_PQ_01)); + mb(); + + /* + * We have a possible subtle race here: The escalation + * interrupt might have fired and be on its way to the + * host queue while we mask it, and if we unmask it + * early enough (re-cede right away), there is a + * theorical possibility that it fires again, thus + * landing in the target queue more than once which is + * a big no-no. + * + * Fortunately, solving this is rather easy. If the + * above load setting PQ to 01 returns a previous + * value where P is set, then we know the escalation + * interrupt is somewhere on its way to the host. In + * that case we simply don't clear the xive_esc_on + * flag below. It will be eventually cleared by the + * handler for the escalation interrupt. + * + * Then, when doing a cede, we check that flag again + * before re-enabling the escalation interrupt, and if + * set, we abort the cede. + */ + if (!(pq & XIVE_ESB_VAL_P)) + /* Now P is 0, we can clear the flag */ + vcpu->arch.xive_esc_on = 0; + } +} +EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu); + /* * This is a simple trigger for a generic XIVE IRQ. This must * only be called for interrupts that support a trigger page From 32eb150aee8dce5076638ebafe53732276c9f28e Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 8 Oct 2018 16:30:56 +1100 Subject: [PATCH 071/204] KVM: PPC: Book3S HV: Handle hypervisor instruction faults better Currently the code for handling hypervisor instruction page faults passes 0 for the flags indicating the type of fault, which is OK in the usual case that the page is not mapped in the partition-scoped page tables. However, there are other causes for hypervisor instruction page faults, such as not being to update a reference (R) or change (C) bit. The cause is indicated in bits in HSRR1, including a bit which indicates that the fault is due to not being able to write to a page (for example to update an R or C bit). Not handling these other kinds of faults correctly can lead to a loop of continual faults without forward progress in the guest. In order to handle these faults better, this patch constructs a "DSISR-like" value from the bits which DSISR and SRR1 (for a HISI) have in common, and passes it to kvmppc_book3s_hv_page_fault() so that it knows what caused the fault. Reviewed-by: David Gibson Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/reg.h | 1 + arch/powerpc/kvm/book3s_hv.c | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index e5b314ed054e..6fda746ba328 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -766,6 +766,7 @@ #define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */ #define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */ #define HSRR1_DENORM 0x00100000 /* Denorm exception */ +#define HSRR1_HISI_WRITE 0x00010000 /* HISI bcs couldn't update mem */ #define SPRN_TBCTL 0x35f /* PA6T Timebase control register */ #define TBCTL_FREEZE 0x0000000000000000ull /* Freeze all tbs */ diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 4befa5a812fe..3334ed9e17e8 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1188,7 +1188,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, break; case BOOK3S_INTERRUPT_H_INST_STORAGE: vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); - vcpu->arch.fault_dsisr = 0; + vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr & + DSISR_SRR1_MATCH_64S; + if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) + vcpu->arch.fault_dsisr |= DSISR_ISSTORE; r = RESUME_PAGE_FAULT; break; /* From 9a94d3ee2d159927c0f8e5078228eadbce8dda43 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 8 Oct 2018 16:30:57 +1100 Subject: [PATCH 072/204] KVM: PPC: Book3S HV: Add a debugfs file to dump radix mappings This adds a file called 'radix' in the debugfs directory for the guest, which when read gives all of the valid leaf PTEs in the partition-scoped radix tree for a radix guest, in human-readable format. It is analogous to the existing 'htab' file which dumps the HPT entries for a HPT guest. Reviewed-by: David Gibson Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/kvm_book3s_64.h | 1 + arch/powerpc/include/asm/kvm_host.h | 1 + arch/powerpc/kvm/book3s_64_mmu_radix.c | 179 +++++++++++++++++++++++ arch/powerpc/kvm/book3s_hv.c | 2 + 4 files changed, 183 insertions(+) diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index dc435a5af7d6..af25aaa24090 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -435,6 +435,7 @@ static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm) } extern void kvmppc_mmu_debugfs_init(struct kvm *kvm); +extern void kvmhv_radix_debugfs_init(struct kvm *kvm); extern void kvmhv_rm_send_ipi(int cpu); diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 3cd0b9f45c2a..a3d4f61a409d 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -291,6 +291,7 @@ struct kvm_arch { u64 process_table; struct dentry *debugfs_dir; struct dentry *htab_dentry; + struct dentry *radix_dentry; struct kvm_resize_hpt *resize_hpt; /* protected by kvm->lock */ #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 933c574e1cf7..71951b5a2c68 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -10,6 +10,9 @@ #include #include #include +#include +#include +#include #include #include @@ -853,6 +856,182 @@ static void pmd_ctor(void *addr) memset(addr, 0, RADIX_PMD_TABLE_SIZE); } +struct debugfs_radix_state { + struct kvm *kvm; + struct mutex mutex; + unsigned long gpa; + int chars_left; + int buf_index; + char buf[128]; + u8 hdr; +}; + +static int debugfs_radix_open(struct inode *inode, struct file *file) +{ + struct kvm *kvm = inode->i_private; + struct debugfs_radix_state *p; + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return -ENOMEM; + + kvm_get_kvm(kvm); + p->kvm = kvm; + mutex_init(&p->mutex); + file->private_data = p; + + return nonseekable_open(inode, file); +} + +static int debugfs_radix_release(struct inode *inode, struct file *file) +{ + struct debugfs_radix_state *p = file->private_data; + + kvm_put_kvm(p->kvm); + kfree(p); + return 0; +} + +static ssize_t debugfs_radix_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos) +{ + struct debugfs_radix_state *p = file->private_data; + ssize_t ret, r; + unsigned long n; + struct kvm *kvm; + unsigned long gpa; + pgd_t *pgt; + pgd_t pgd, *pgdp; + pud_t pud, *pudp; + pmd_t pmd, *pmdp; + pte_t *ptep; + int shift; + unsigned long pte; + + kvm = p->kvm; + if (!kvm_is_radix(kvm)) + return 0; + + ret = mutex_lock_interruptible(&p->mutex); + if (ret) + return ret; + + if (p->chars_left) { + n = p->chars_left; + if (n > len) + n = len; + r = copy_to_user(buf, p->buf + p->buf_index, n); + n -= r; + p->chars_left -= n; + p->buf_index += n; + buf += n; + len -= n; + ret = n; + if (r) { + if (!n) + ret = -EFAULT; + goto out; + } + } + + gpa = p->gpa; + pgt = kvm->arch.pgtable; + while (len != 0 && gpa < RADIX_PGTABLE_RANGE) { + if (!p->hdr) { + n = scnprintf(p->buf, sizeof(p->buf), + "pgdir: %lx\n", (unsigned long)pgt); + p->hdr = 1; + goto copy; + } + + pgdp = pgt + pgd_index(gpa); + pgd = READ_ONCE(*pgdp); + if (!(pgd_val(pgd) & _PAGE_PRESENT)) { + gpa = (gpa & PGDIR_MASK) + PGDIR_SIZE; + continue; + } + + pudp = pud_offset(&pgd, gpa); + pud = READ_ONCE(*pudp); + if (!(pud_val(pud) & _PAGE_PRESENT)) { + gpa = (gpa & PUD_MASK) + PUD_SIZE; + continue; + } + if (pud_val(pud) & _PAGE_PTE) { + pte = pud_val(pud); + shift = PUD_SHIFT; + goto leaf; + } + + pmdp = pmd_offset(&pud, gpa); + pmd = READ_ONCE(*pmdp); + if (!(pmd_val(pmd) & _PAGE_PRESENT)) { + gpa = (gpa & PMD_MASK) + PMD_SIZE; + continue; + } + if (pmd_val(pmd) & _PAGE_PTE) { + pte = pmd_val(pmd); + shift = PMD_SHIFT; + goto leaf; + } + + ptep = pte_offset_kernel(&pmd, gpa); + pte = pte_val(READ_ONCE(*ptep)); + if (!(pte & _PAGE_PRESENT)) { + gpa += PAGE_SIZE; + continue; + } + shift = PAGE_SHIFT; + leaf: + n = scnprintf(p->buf, sizeof(p->buf), + " %lx: %lx %d\n", gpa, pte, shift); + gpa += 1ul << shift; + copy: + p->chars_left = n; + if (n > len) + n = len; + r = copy_to_user(buf, p->buf, n); + n -= r; + p->chars_left -= n; + p->buf_index = n; + buf += n; + len -= n; + ret += n; + if (r) { + if (!ret) + ret = -EFAULT; + break; + } + } + p->gpa = gpa; + + out: + mutex_unlock(&p->mutex); + return ret; +} + +static ssize_t debugfs_radix_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) +{ + return -EACCES; +} + +static const struct file_operations debugfs_radix_fops = { + .owner = THIS_MODULE, + .open = debugfs_radix_open, + .release = debugfs_radix_release, + .read = debugfs_radix_read, + .write = debugfs_radix_write, + .llseek = generic_file_llseek, +}; + +void kvmhv_radix_debugfs_init(struct kvm *kvm) +{ + kvm->arch.radix_dentry = debugfs_create_file("radix", 0400, + kvm->arch.debugfs_dir, kvm, + &debugfs_radix_fops); +} + int kvmppc_radix_init(void) { unsigned long size = sizeof(void *) << RADIX_PTE_INDEX_SIZE; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 3334ed9e17e8..deaca0135aba 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4485,6 +4485,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) snprintf(buf, sizeof(buf), "vm%d", current->pid); kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir); kvmppc_mmu_debugfs_init(kvm); + if (radix_enabled()) + kvmhv_radix_debugfs_init(kvm); return 0; } From fd0944baad806dfb4c777124ec712c55b714ff51 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 8 Oct 2018 16:30:58 +1100 Subject: [PATCH 073/204] KVM: PPC: Use ccr field in pt_regs struct embedded in vcpu struct When the 'regs' field was added to struct kvm_vcpu_arch, the code was changed to use several of the fields inside regs (e.g., gpr, lr, etc.) but not the ccr field, because the ccr field in struct pt_regs is 64 bits on 64-bit platforms, but the cr field in kvm_vcpu_arch is only 32 bits. This changes the code to use the regs.ccr field instead of cr, and changes the assembly code on 64-bit platforms to use 64-bit loads and stores instead of 32-bit ones. Reviewed-by: David Gibson Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/kvm_book3s.h | 4 ++-- arch/powerpc/include/asm/kvm_book3s_64.h | 4 ++-- arch/powerpc/include/asm/kvm_booke.h | 4 ++-- arch/powerpc/include/asm/kvm_host.h | 2 -- arch/powerpc/kernel/asm-offsets.c | 4 ++-- arch/powerpc/kvm/book3s_emulate.c | 12 ++++++------ arch/powerpc/kvm/book3s_hv.c | 4 ++-- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 4 ++-- arch/powerpc/kvm/book3s_hv_tm.c | 6 +++--- arch/powerpc/kvm/book3s_hv_tm_builtin.c | 5 +++-- arch/powerpc/kvm/book3s_pr.c | 4 ++-- arch/powerpc/kvm/bookehv_interrupts.S | 8 ++++---- arch/powerpc/kvm/emulate_loadstore.c | 1 - 13 files changed, 30 insertions(+), 32 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 83a9aa3cf689..dd18d8174504 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -301,12 +301,12 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) { - vcpu->arch.cr = val; + vcpu->arch.regs.ccr = val; } static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) { - return vcpu->arch.cr; + return vcpu->arch.regs.ccr; } static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index af25aaa24090..5c0e2d9a7e15 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -483,7 +483,7 @@ static inline u64 sanitize_msr(u64 msr) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu) { - vcpu->arch.cr = vcpu->arch.cr_tm; + vcpu->arch.regs.ccr = vcpu->arch.cr_tm; vcpu->arch.regs.xer = vcpu->arch.xer_tm; vcpu->arch.regs.link = vcpu->arch.lr_tm; vcpu->arch.regs.ctr = vcpu->arch.ctr_tm; @@ -500,7 +500,7 @@ static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu) static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu) { - vcpu->arch.cr_tm = vcpu->arch.cr; + vcpu->arch.cr_tm = vcpu->arch.regs.ccr; vcpu->arch.xer_tm = vcpu->arch.regs.xer; vcpu->arch.lr_tm = vcpu->arch.regs.link; vcpu->arch.ctr_tm = vcpu->arch.regs.ctr; diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h index d513e3ed1c65..f0cef625f17c 100644 --- a/arch/powerpc/include/asm/kvm_booke.h +++ b/arch/powerpc/include/asm/kvm_booke.h @@ -46,12 +46,12 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) { - vcpu->arch.cr = val; + vcpu->arch.regs.ccr = val; } static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) { - return vcpu->arch.cr; + return vcpu->arch.regs.ccr; } static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index a3d4f61a409d..c9cc42f73b3c 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -538,8 +538,6 @@ struct kvm_vcpu_arch { ulong tar; #endif - u32 cr; - #ifdef CONFIG_PPC_BOOK3S ulong hflags; ulong guest_owned_ext; diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 89cf15566c4e..7c3738d890e8 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -438,7 +438,7 @@ int main(void) #ifdef CONFIG_PPC_BOOK3S OFFSET(VCPU_TAR, kvm_vcpu, arch.tar); #endif - OFFSET(VCPU_CR, kvm_vcpu, arch.cr); + OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr); OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr); @@ -695,7 +695,7 @@ int main(void) #endif /* CONFIG_PPC_BOOK3S_64 */ #else /* CONFIG_PPC_BOOK3S */ - OFFSET(VCPU_CR, kvm_vcpu, arch.cr); + OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr); OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer); OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link); OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr); diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 36b11c5a0dbb..2654df220d05 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -110,7 +110,7 @@ static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu) vcpu->arch.ctr_tm = vcpu->arch.regs.ctr; vcpu->arch.tar_tm = vcpu->arch.tar; vcpu->arch.lr_tm = vcpu->arch.regs.link; - vcpu->arch.cr_tm = vcpu->arch.cr; + vcpu->arch.cr_tm = vcpu->arch.regs.ccr; vcpu->arch.xer_tm = vcpu->arch.regs.xer; vcpu->arch.vrsave_tm = vcpu->arch.vrsave; } @@ -129,7 +129,7 @@ static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu) vcpu->arch.regs.ctr = vcpu->arch.ctr_tm; vcpu->arch.tar = vcpu->arch.tar_tm; vcpu->arch.regs.link = vcpu->arch.lr_tm; - vcpu->arch.cr = vcpu->arch.cr_tm; + vcpu->arch.regs.ccr = vcpu->arch.cr_tm; vcpu->arch.regs.xer = vcpu->arch.xer_tm; vcpu->arch.vrsave = vcpu->arch.vrsave_tm; } @@ -141,7 +141,7 @@ static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val) uint64_t texasr; /* CR0 = 0 | MSR[TS] | 0 */ - vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) | + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) | (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1)) << CR0_SHIFT); @@ -220,7 +220,7 @@ void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val) tm_abort(ra_val); /* CR0 = 0 | MSR[TS] | 0 */ - vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) | + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) | (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1)) << CR0_SHIFT); @@ -494,8 +494,8 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, if (!(kvmppc_get_msr(vcpu) & MSR_PR)) { preempt_disable(); - vcpu->arch.cr = (CR0_TBEGIN_FAILURE | - (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT))); + vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE | + (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT))); vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT | (((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index deaca0135aba..3d99f4f410f9 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -410,8 +410,8 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); pr_err("sprg2 = %.16llx sprg3 = %.16llx\n", vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); - pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n", - vcpu->arch.cr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); + pr_err("cr = %.8lx xer = %.16lx dsisr = %.8x\n", + vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); pr_err("fault dar = %.16lx dsisr = %.8x\n", vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index ea84696b0a3c..9cea23866378 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1092,7 +1092,7 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ld r5, VCPU_LR(r4) - lwz r6, VCPU_CR(r4) + ld r6, VCPU_CR(r4) mtlr r5 mtcr r6 @@ -1280,7 +1280,7 @@ kvmppc_interrupt_hv: std r3, VCPU_GPR(R12)(r9) /* CR is in the high half of r12 */ srdi r4, r12, 32 - stw r4, VCPU_CR(r9) + std r4, VCPU_CR(r9) BEGIN_FTR_SECTION ld r3, HSTATE_CFAR(r13) std r3, VCPU_CFAR(r9) diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c index 008285058f9b..888e2609e3f1 100644 --- a/arch/powerpc/kvm/book3s_hv_tm.c +++ b/arch/powerpc/kvm/book3s_hv_tm.c @@ -130,7 +130,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) return RESUME_GUEST; } /* Set CR0 to indicate previous transactional state */ - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28); /* L=1 => tresume, L=0 => tsuspend */ if (instr & (1 << 21)) { @@ -174,7 +174,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) copy_from_checkpoint(vcpu); /* Set CR0 to indicate previous transactional state */ - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28); vcpu->arch.shregs.msr &= ~MSR_TS_MASK; return RESUME_GUEST; @@ -204,7 +204,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) copy_to_checkpoint(vcpu); /* Set CR0 to indicate previous transactional state */ - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28); vcpu->arch.shregs.msr = msr | MSR_TS_S; return RESUME_GUEST; diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c index b2c7c6fca4f9..3cf5863bc06e 100644 --- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c @@ -89,7 +89,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) if (instr & (1 << 21)) vcpu->arch.shregs.msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; /* Set CR0 to 0b0010 */ - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0x20000000; + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | + 0x20000000; return 1; } @@ -105,5 +106,5 @@ void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu) vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */ vcpu->arch.regs.nip = vcpu->arch.tfhar; copy_from_checkpoint(vcpu); - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000; + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | 0xa0000000; } diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 059683e4e67a..4efd65d9e828 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -167,7 +167,7 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu) svcpu->gpr[11] = vcpu->arch.regs.gpr[11]; svcpu->gpr[12] = vcpu->arch.regs.gpr[12]; svcpu->gpr[13] = vcpu->arch.regs.gpr[13]; - svcpu->cr = vcpu->arch.cr; + svcpu->cr = vcpu->arch.regs.ccr; svcpu->xer = vcpu->arch.regs.xer; svcpu->ctr = vcpu->arch.regs.ctr; svcpu->lr = vcpu->arch.regs.link; @@ -249,7 +249,7 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) vcpu->arch.regs.gpr[11] = svcpu->gpr[11]; vcpu->arch.regs.gpr[12] = svcpu->gpr[12]; vcpu->arch.regs.gpr[13] = svcpu->gpr[13]; - vcpu->arch.cr = svcpu->cr; + vcpu->arch.regs.ccr = svcpu->cr; vcpu->arch.regs.xer = svcpu->xer; vcpu->arch.regs.ctr = svcpu->ctr; vcpu->arch.regs.link = svcpu->lr; diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index 81bd8a07aa51..051af7d97327 100644 --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S @@ -182,7 +182,7 @@ */ PPC_LL r4, PACACURRENT(r13) PPC_LL r4, (THREAD + THREAD_KVM_VCPU)(r4) - stw r10, VCPU_CR(r4) + PPC_STL r10, VCPU_CR(r4) PPC_STL r11, VCPU_GPR(R4)(r4) PPC_STL r5, VCPU_GPR(R5)(r4) PPC_STL r6, VCPU_GPR(R6)(r4) @@ -292,7 +292,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1) PPC_STL r4, VCPU_GPR(R4)(r11) PPC_LL r4, THREAD_NORMSAVE(0)(r10) PPC_STL r5, VCPU_GPR(R5)(r11) - stw r13, VCPU_CR(r11) + PPC_STL r13, VCPU_CR(r11) mfspr r5, \srr0 PPC_STL r3, VCPU_GPR(R10)(r11) PPC_LL r3, THREAD_NORMSAVE(2)(r10) @@ -319,7 +319,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1) PPC_STL r4, VCPU_GPR(R4)(r11) PPC_LL r4, GPR9(r8) PPC_STL r5, VCPU_GPR(R5)(r11) - stw r9, VCPU_CR(r11) + PPC_STL r9, VCPU_CR(r11) mfspr r5, \srr0 PPC_STL r3, VCPU_GPR(R8)(r11) PPC_LL r3, GPR10(r8) @@ -643,7 +643,7 @@ lightweight_exit: PPC_LL r3, VCPU_LR(r4) PPC_LL r5, VCPU_XER(r4) PPC_LL r6, VCPU_CTR(r4) - lwz r7, VCPU_CR(r4) + PPC_LL r7, VCPU_CR(r4) PPC_LL r8, VCPU_PC(r4) PPC_LD(r9, VCPU_SHARED_MSR, r11) PPC_LL r0, VCPU_GPR(R0)(r4) diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c index 75dce1ef3bc8..f91b1309a0a8 100644 --- a/arch/powerpc/kvm/emulate_loadstore.c +++ b/arch/powerpc/kvm/emulate_loadstore.c @@ -117,7 +117,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) emulated = EMULATE_FAIL; vcpu->arch.regs.msr = vcpu->arch.shared->msr; - vcpu->arch.regs.ccr = vcpu->arch.cr; if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) { int type = op.type & INSTR_TYPE_MASK; int size = GETSIZE(op.type); From 89329c0be8bdab7b8dfb0c6da777951d4cef06c0 Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Mon, 8 Oct 2018 16:30:59 +1100 Subject: [PATCH 074/204] KVM: PPC: Book3S HV: Clear partition table entry on vm teardown When destroying a VM we return the LPID to the pool, however we never zero the partition table entry. This is instead done when we reallocate the LPID. Zero the partition table entry on VM teardown before returning the LPID to the pool. This means if we were running as a nested hypervisor the real hypervisor could use this to determine when it can free resources. Reviewed-by: David Gibson Signed-off-by: Suraj Jitindar Singh Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/kvm/book3s_hv.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 3d99f4f410f9..8c20a90a6851 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4509,13 +4509,19 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) kvmppc_free_vcores(kvm); - kvmppc_free_lpid(kvm->arch.lpid); if (kvm_is_radix(kvm)) kvmppc_free_radix(kvm); else kvmppc_free_hpt(&kvm->arch.hpt); + /* Perform global invalidation and return lpid to the pool */ + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + kvm->arch.process_table = 0; + kvmppc_setup_partition_table(kvm); + } + kvmppc_free_lpid(kvm->arch.lpid); + kvmppc_free_pimap(kvm); } From 9811c78e968f26ca040c53f6180ff2018939ae24 Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Mon, 8 Oct 2018 16:31:00 +1100 Subject: [PATCH 075/204] KVM: PPC: Book3S HV: Make kvmppc_mmu_radix_xlate process/partition table agnostic kvmppc_mmu_radix_xlate() is used to translate an effective address through the process tables. The process table and partition tables have identical layout. Exploit this fact to make the kvmppc_mmu_radix_xlate() function able to translate either an effective address through the process tables or a guest real address through the partition tables. [paulus@ozlabs.org - reduced diffs from previous code] Reviewed-by: David Gibson Signed-off-by: Suraj Jitindar Singh Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/kvm_book3s.h | 3 + arch/powerpc/kvm/book3s_64_mmu_radix.c | 109 +++++++++++++++++-------- 2 files changed, 78 insertions(+), 34 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index dd18d8174504..91c977948828 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -188,6 +188,9 @@ extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc); extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned long ea, unsigned long dsisr); +extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, + struct kvmppc_pte *gpte, u64 table, + int table_index, u64 *pte_ret_p); extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, bool data, bool iswrite); extern int kvmppc_init_vm_radix(struct kvm *kvm); diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 71951b5a2c68..f2976f456fe0 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -29,83 +29,92 @@ */ static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 }; -int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, - struct kvmppc_pte *gpte, bool data, bool iswrite) +/* + * Used to walk a partition or process table radix tree in guest memory + * Note: We exploit the fact that a partition table and a process + * table have the same layout, a partition-scoped page table and a + * process-scoped page table have the same layout, and the 2nd + * doubleword of a partition table entry has the same layout as + * the PTCR register. + */ +int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, + struct kvmppc_pte *gpte, u64 table, + int table_index, u64 *pte_ret_p) { struct kvm *kvm = vcpu->kvm; - u32 pid; int ret, level, ps; - __be64 prte, rpte; - unsigned long ptbl; - unsigned long root, pte, index; + unsigned long ptbl, root; unsigned long rts, bits, offset; - unsigned long gpa; - unsigned long proc_tbl_size; + unsigned long size, index; + struct prtb_entry entry; + u64 pte, base, gpa; + __be64 rpte; - /* Work out effective PID */ - switch (eaddr >> 62) { - case 0: - pid = vcpu->arch.pid; - break; - case 3: - pid = 0; - break; - default: + if ((table & PRTS_MASK) > 24) return -EINVAL; - } - proc_tbl_size = 1 << ((kvm->arch.process_table & PRTS_MASK) + 12); - if (pid * 16 >= proc_tbl_size) + size = 1ul << ((table & PRTS_MASK) + 12); + + /* Is the table big enough to contain this entry? */ + if ((table_index * sizeof(entry)) >= size) return -EINVAL; - /* Read partition table to find root of tree for effective PID */ - ptbl = (kvm->arch.process_table & PRTB_MASK) + (pid * 16); - ret = kvm_read_guest(kvm, ptbl, &prte, sizeof(prte)); + /* Read the table to find the root of the radix tree */ + ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry)); + ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry)); if (ret) return ret; - root = be64_to_cpu(prte); + /* Root is stored in the first double word */ + root = be64_to_cpu(entry.prtb0); rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) | ((root & RTS2_MASK) >> RTS2_SHIFT); bits = root & RPDS_MASK; - root = root & RPDB_MASK; + base = root & RPDB_MASK; offset = rts + 31; - /* current implementations only support 52-bit space */ + /* Current implementations only support 52-bit space */ if (offset != 52) return -EINVAL; + /* Walk each level of the radix tree */ for (level = 3; level >= 0; --level) { + /* Check a valid size */ if (level && bits != p9_supported_radix_bits[level]) return -EINVAL; if (level == 0 && !(bits == 5 || bits == 9)) return -EINVAL; offset -= bits; index = (eaddr >> offset) & ((1UL << bits) - 1); - /* check that low bits of page table base are zero */ - if (root & ((1UL << (bits + 3)) - 1)) + /* Check that low bits of page table base are zero */ + if (base & ((1UL << (bits + 3)) - 1)) return -EINVAL; - ret = kvm_read_guest(kvm, root + index * 8, + /* Read the entry from guest memory */ + ret = kvm_read_guest(kvm, base + (index * sizeof(rpte)), &rpte, sizeof(rpte)); if (ret) return ret; pte = __be64_to_cpu(rpte); if (!(pte & _PAGE_PRESENT)) return -ENOENT; + /* Check if a leaf entry */ if (pte & _PAGE_PTE) break; - bits = pte & 0x1f; - root = pte & 0x0fffffffffffff00ul; + /* Get ready to walk the next level */ + base = pte & RPDB_MASK; + bits = pte & RPDS_MASK; } - /* need a leaf at lowest level; 512GB pages not supported */ + + /* Need a leaf at lowest level; 512GB pages not supported */ if (level < 0 || level == 3) return -EINVAL; - /* offset is now log base 2 of the page size */ + /* We found a valid leaf PTE */ + /* Offset is now log base 2 of the page size */ gpa = pte & 0x01fffffffffff000ul; if (gpa & ((1ul << offset) - 1)) return -EINVAL; - gpa += eaddr & ((1ul << offset) - 1); + gpa |= eaddr & ((1ul << offset) - 1); for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps) if (offset == mmu_psize_defs[ps].shift) break; @@ -118,6 +127,38 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, gpte->may_read = !!(pte & _PAGE_READ); gpte->may_write = !!(pte & _PAGE_WRITE); gpte->may_execute = !!(pte & _PAGE_EXEC); + + if (pte_ret_p) + *pte_ret_p = pte; + + return 0; +} + +int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, + struct kvmppc_pte *gpte, bool data, bool iswrite) +{ + u32 pid; + u64 pte; + int ret; + + /* Work out effective PID */ + switch (eaddr >> 62) { + case 0: + pid = vcpu->arch.pid; + break; + case 3: + pid = 0; + break; + default: + return -EINVAL; + } + + ret = kvmppc_mmu_radix_translate_table(vcpu, eaddr, gpte, + vcpu->kvm->arch.process_table, pid, &pte); + if (ret) + return ret; + + /* Check privilege (applies only to process scoped translations) */ if (kvmppc_get_msr(vcpu) & MSR_PR) { if (pte & _PAGE_PRIVILEGED) { gpte->may_read = 0; From 04bae9d5b4c09ac7e9a4941c0e555f7e5e335efd Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Mon, 8 Oct 2018 16:31:01 +1100 Subject: [PATCH 076/204] KVM: PPC: Book3S HV: Refactor radix page fault handler The radix page fault handler accounts for all cases, including just needing to insert a pte. This breaks it up into separate functions for the two main cases; setting rc and inserting a pte. This allows us to make the setting of rc and inserting of a pte generic for any pgtable, not specific to the one for this guest. [paulus@ozlabs.org - reduced diffs from previous code] Reviewed-by: David Gibson Signed-off-by: Suraj Jitindar Singh Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/kvm/book3s_64_mmu_radix.c | 210 +++++++++++++++---------- 1 file changed, 123 insertions(+), 87 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index f2976f456fe0..47f2b1855593 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -400,8 +400,9 @@ static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, */ #define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)) -static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, - unsigned int level, unsigned long mmu_seq) +static int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, + unsigned long gpa, unsigned int level, + unsigned long mmu_seq) { pgd_t *pgd; pud_t *pud, *new_pud = NULL; @@ -410,7 +411,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, int ret; /* Traverse the guest's 2nd-level tree, allocate new levels needed */ - pgd = kvm->arch.pgtable + pgd_index(gpa); + pgd = pgtable + pgd_index(gpa); pud = NULL; if (pgd_present(*pgd)) pud = pud_offset(pgd, gpa); @@ -565,95 +566,49 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, return ret; } -int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, - unsigned long ea, unsigned long dsisr) +static bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, + bool writing, unsigned long gpa) +{ + unsigned long pgflags; + unsigned int shift; + pte_t *ptep; + + /* + * Need to set an R or C bit in the 2nd-level tables; + * since we are just helping out the hardware here, + * it is sufficient to do what the hardware does. + */ + pgflags = _PAGE_ACCESSED; + if (writing) + pgflags |= _PAGE_DIRTY; + /* + * We are walking the secondary (partition-scoped) page table here. + * We can do this without disabling irq because the Linux MM + * subsystem doesn't do THP splits and collapses on this tree. + */ + ptep = __find_linux_pte(pgtable, gpa, NULL, &shift); + if (ptep && pte_present(*ptep) && (!writing || pte_write(*ptep))) { + kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift); + return true; + } + return false; +} + +static int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, + unsigned long gpa, + struct kvm_memory_slot *memslot, + bool writing, bool kvm_ro, + pte_t *inserted_pte, unsigned int *levelp) { struct kvm *kvm = vcpu->kvm; - unsigned long mmu_seq; - unsigned long gpa, gfn, hva; - struct kvm_memory_slot *memslot; struct page *page = NULL; - long ret; - bool writing; + unsigned long mmu_seq; + unsigned long hva, gfn = gpa >> PAGE_SHIFT; bool upgrade_write = false; bool *upgrade_p = &upgrade_write; pte_t pte, *ptep; - unsigned long pgflags; unsigned int shift, level; - - /* Check for unusual errors */ - if (dsisr & DSISR_UNSUPP_MMU) { - pr_err("KVM: Got unsupported MMU fault\n"); - return -EFAULT; - } - if (dsisr & DSISR_BADACCESS) { - /* Reflect to the guest as DSI */ - pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr); - kvmppc_core_queue_data_storage(vcpu, ea, dsisr); - return RESUME_GUEST; - } - - /* Translate the logical address and get the page */ - gpa = vcpu->arch.fault_gpa & ~0xfffUL; - gpa &= ~0xF000000000000000ul; - gfn = gpa >> PAGE_SHIFT; - if (!(dsisr & DSISR_PRTABLE_FAULT)) - gpa |= ea & 0xfff; - memslot = gfn_to_memslot(kvm, gfn); - - /* No memslot means it's an emulated MMIO region */ - if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { - if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS | - DSISR_SET_RC)) { - /* - * Bad address in guest page table tree, or other - * unusual error - reflect it to the guest as DSI. - */ - kvmppc_core_queue_data_storage(vcpu, ea, dsisr); - return RESUME_GUEST; - } - return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, - dsisr & DSISR_ISSTORE); - } - - writing = (dsisr & DSISR_ISSTORE) != 0; - if (memslot->flags & KVM_MEM_READONLY) { - if (writing) { - /* give the guest a DSI */ - dsisr = DSISR_ISSTORE | DSISR_PROTFAULT; - kvmppc_core_queue_data_storage(vcpu, ea, dsisr); - return RESUME_GUEST; - } - upgrade_p = NULL; - } - - if (dsisr & DSISR_SET_RC) { - /* - * Need to set an R or C bit in the 2nd-level tables; - * since we are just helping out the hardware here, - * it is sufficient to do what the hardware does. - */ - pgflags = _PAGE_ACCESSED; - if (writing) - pgflags |= _PAGE_DIRTY; - /* - * We are walking the secondary page table here. We can do this - * without disabling irq. - */ - spin_lock(&kvm->mmu_lock); - ptep = __find_linux_pte(kvm->arch.pgtable, - gpa, NULL, &shift); - if (ptep && pte_present(*ptep) && - (!writing || pte_write(*ptep))) { - kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, - gpa, shift); - dsisr &= ~DSISR_SET_RC; - } - spin_unlock(&kvm->mmu_lock); - if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE | - DSISR_PROTFAULT | DSISR_SET_RC))) - return RESUME_GUEST; - } + int ret; /* used to check for invalidations in progress */ mmu_seq = kvm->mmu_notifier_seq; @@ -666,7 +621,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, * is that the page is writable. */ hva = gfn_to_hva_memslot(memslot, gfn); - if (upgrade_p && __get_user_pages_fast(hva, 1, 1, &page) == 1) { + if (!kvm_ro && __get_user_pages_fast(hva, 1, 1, &page) == 1) { upgrade_write = true; } else { unsigned long pfn; @@ -724,7 +679,12 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, } /* Allocate space in the tree and write the PTE */ - ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq); + ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level, + mmu_seq); + if (inserted_pte) + *inserted_pte = pte; + if (levelp) + *levelp = level; if (page) { if (!ret && (pte_val(pte) & _PAGE_WRITE)) @@ -732,6 +692,82 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, put_page(page); } + return ret; +} + +int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned long ea, unsigned long dsisr) +{ + struct kvm *kvm = vcpu->kvm; + unsigned long gpa, gfn; + struct kvm_memory_slot *memslot; + long ret; + bool writing = !!(dsisr & DSISR_ISSTORE); + bool kvm_ro = false; + + /* Check for unusual errors */ + if (dsisr & DSISR_UNSUPP_MMU) { + pr_err("KVM: Got unsupported MMU fault\n"); + return -EFAULT; + } + if (dsisr & DSISR_BADACCESS) { + /* Reflect to the guest as DSI */ + pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr); + kvmppc_core_queue_data_storage(vcpu, ea, dsisr); + return RESUME_GUEST; + } + + /* Translate the logical address */ + gpa = vcpu->arch.fault_gpa & ~0xfffUL; + gpa &= ~0xF000000000000000ul; + gfn = gpa >> PAGE_SHIFT; + if (!(dsisr & DSISR_PRTABLE_FAULT)) + gpa |= ea & 0xfff; + + /* Get the corresponding memslot */ + memslot = gfn_to_memslot(kvm, gfn); + + /* No memslot means it's an emulated MMIO region */ + if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { + if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS | + DSISR_SET_RC)) { + /* + * Bad address in guest page table tree, or other + * unusual error - reflect it to the guest as DSI. + */ + kvmppc_core_queue_data_storage(vcpu, ea, dsisr); + return RESUME_GUEST; + } + return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing); + } + + if (memslot->flags & KVM_MEM_READONLY) { + if (writing) { + /* give the guest a DSI */ + kvmppc_core_queue_data_storage(vcpu, ea, DSISR_ISSTORE | + DSISR_PROTFAULT); + return RESUME_GUEST; + } + kvm_ro = true; + } + + /* Failed to set the reference/change bits */ + if (dsisr & DSISR_SET_RC) { + spin_lock(&kvm->mmu_lock); + if (kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable, + writing, gpa)) + dsisr &= ~DSISR_SET_RC; + spin_unlock(&kvm->mmu_lock); + + if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE | + DSISR_PROTFAULT | DSISR_SET_RC))) + return RESUME_GUEST; + } + + /* Try to insert a pte */ + ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing, + kvm_ro, NULL, NULL); + if (ret == 0 || ret == -EAGAIN) ret = RESUME_GUEST; return ret; From f0f825f0e20abe9ba6eb71b7c1eebd891fb4375c Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 8 Oct 2018 16:31:02 +1100 Subject: [PATCH 077/204] KVM: PPC: Book3S HV: Use kvmppc_unmap_pte() in kvm_unmap_radix() kvmppc_unmap_pte() does a sequence of operations that are open-coded in kvm_unmap_radix(). This extends kvmppc_unmap_pte() a little so that it can be used by kvm_unmap_radix(), and makes kvm_unmap_radix() call it. Reviewed-by: David Gibson Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/kvm/book3s_64_mmu_radix.c | 33 ++++++++++---------------- 1 file changed, 13 insertions(+), 20 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 47f2b1855593..bd06a955d190 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -240,19 +240,22 @@ static void kvmppc_pmd_free(pmd_t *pmdp) } static void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, - unsigned long gpa, unsigned int shift) + unsigned long gpa, unsigned int shift, + struct kvm_memory_slot *memslot) { - unsigned long page_size = 1ul << shift; unsigned long old; old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift); kvmppc_radix_tlbie_page(kvm, gpa, shift); if (old & _PAGE_DIRTY) { unsigned long gfn = gpa >> PAGE_SHIFT; - struct kvm_memory_slot *memslot; + unsigned long page_size = PAGE_SIZE; - memslot = gfn_to_memslot(kvm, gfn); + if (shift) + page_size = 1ul << shift; + if (!memslot) + memslot = gfn_to_memslot(kvm, gfn); if (memslot && memslot->dirty_bitmap) kvmppc_update_dirty_map(memslot, gfn, page_size); } @@ -282,7 +285,7 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full) WARN_ON_ONCE(1); kvmppc_unmap_pte(kvm, p, pte_pfn(*p) << PAGE_SHIFT, - PAGE_SHIFT); + PAGE_SHIFT, NULL); } } @@ -304,7 +307,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full) WARN_ON_ONCE(1); kvmppc_unmap_pte(kvm, (pte_t *)p, pte_pfn(*(pte_t *)p) << PAGE_SHIFT, - PMD_SHIFT); + PMD_SHIFT, NULL); } } else { pte_t *pte; @@ -468,7 +471,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, goto out_unlock; } /* Valid 1GB page here already, remove it */ - kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT); + kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL); } if (level == 2) { if (!pud_none(*pud)) { @@ -517,7 +520,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, goto out_unlock; } /* Valid 2MB page here already, remove it */ - kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT); + kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL); } if (level == 1) { if (!pmd_none(*pmd)) { @@ -780,20 +783,10 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, pte_t *ptep; unsigned long gpa = gfn << PAGE_SHIFT; unsigned int shift; - unsigned long old; ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); - if (ptep && pte_present(*ptep)) { - old = kvmppc_radix_update_pte(kvm, ptep, ~0UL, 0, - gpa, shift); - kvmppc_radix_tlbie_page(kvm, gpa, shift); - if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) { - unsigned long psize = PAGE_SIZE; - if (shift) - psize = 1ul << shift; - kvmppc_update_dirty_map(memslot, gfn, psize); - } - } + if (ptep && pte_present(*ptep)) + kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot); return 0; } From 8e3f5fc1045dc49fd175b978c5457f5f51e7a2ce Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 8 Oct 2018 16:31:03 +1100 Subject: [PATCH 078/204] KVM: PPC: Book3S HV: Framework and hcall stubs for nested virtualization This starts the process of adding the code to support nested HV-style virtualization. It defines a new H_SET_PARTITION_TABLE hypercall which a nested hypervisor can use to set the base address and size of a partition table in its memory (analogous to the PTCR register). On the host (level 0 hypervisor) side, the H_SET_PARTITION_TABLE hypercall from the guest is handled by code that saves the virtual PTCR value for the guest. This also adds code for creating and destroying nested guests and for reading the partition table entry for a nested guest from L1 memory. Each nested guest has its own shadow LPID value, different in general from the LPID value used by the nested hypervisor to refer to it. The shadow LPID value is allocated at nested guest creation time. Nested hypervisor functionality is only available for a radix guest, which therefore means a radix host on a POWER9 (or later) processor. Signed-off-by: Paul Mackerras Reviewed-by: David Gibson Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/hvcall.h | 5 + arch/powerpc/include/asm/kvm_book3s.h | 10 +- arch/powerpc/include/asm/kvm_book3s_64.h | 33 +++ arch/powerpc/include/asm/kvm_book3s_asm.h | 3 + arch/powerpc/include/asm/kvm_host.h | 5 + arch/powerpc/kvm/Makefile | 3 +- arch/powerpc/kvm/book3s_hv.c | 31 ++- arch/powerpc/kvm/book3s_hv_nested.c | 301 ++++++++++++++++++++++ 8 files changed, 384 insertions(+), 7 deletions(-) create mode 100644 arch/powerpc/kvm/book3s_hv_nested.c diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index a0b17f9f1ea4..c95c6518bf27 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -322,6 +322,11 @@ #define H_GET_24X7_DATA 0xF07C #define H_GET_PERF_COUNTER_INFO 0xF080 +/* Platform-specific hcalls used for nested HV KVM */ +#define H_SET_PARTITION_TABLE 0xF800 +#define H_ENTER_NESTED 0xF804 +#define H_TLB_INVALIDATE 0xF808 + /* Values for 2nd argument to H_SET_MODE */ #define H_SET_MODE_RESOURCE_SET_CIABR 1 #define H_SET_MODE_RESOURCE_SET_DAWR 2 diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 91c977948828..43f212e38b89 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -274,6 +274,13 @@ static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {} static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {} #endif +long kvmhv_nested_init(void); +void kvmhv_nested_exit(void); +void kvmhv_vm_nested_init(struct kvm *kvm); +long kvmhv_set_partition_table(struct kvm_vcpu *vcpu); +void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1); +void kvmhv_release_all_nested(struct kvm *kvm); + void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); extern int kvm_irq_bypass; @@ -387,9 +394,6 @@ extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu); /* TO = 31 for unconditional trap */ #define INS_TW 0x7fe00008 -/* LPIDs we support with this build -- runtime limit may be lower */ -#define KVMPPC_NR_LPIDS (LPID_RSVD + 1) - #define SPLIT_HACK_MASK 0xff000000 #define SPLIT_HACK_OFFS 0xfb000000 diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 5c0e2d9a7e15..6d67b6a9e784 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -23,6 +23,39 @@ #include #include #include +#include + +#ifdef CONFIG_PPC_PSERIES +static inline bool kvmhv_on_pseries(void) +{ + return !cpu_has_feature(CPU_FTR_HVMODE); +} +#else +static inline bool kvmhv_on_pseries(void) +{ + return false; +} +#endif + +/* + * Structure for a nested guest, that is, for a guest that is managed by + * one of our guests. + */ +struct kvm_nested_guest { + struct kvm *l1_host; /* L1 VM that owns this nested guest */ + int l1_lpid; /* lpid L1 guest thinks this guest is */ + int shadow_lpid; /* real lpid of this nested guest */ + pgd_t *shadow_pgtable; /* our page table for this guest */ + u64 l1_gr_to_hr; /* L1's addr of part'n-scoped table */ + u64 process_table; /* process table entry for this guest */ + long refcnt; /* number of pointers to this struct */ + struct mutex tlb_lock; /* serialize page faults and tlbies */ + struct kvm_nested_guest *next; +}; + +struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, + bool create); +void kvmhv_put_nested(struct kvm_nested_guest *gp); /* Power architecture requires HPT is at least 256kiB, at most 64TiB */ #define PPC_MIN_HPT_ORDER 18 diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index d978fdf698af..eb3ba6390108 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -25,6 +25,9 @@ #define XICS_MFRR 0xc #define XICS_IPI 2 /* interrupt source # for IPIs */ +/* LPIDs we support with this build -- runtime limit may be lower */ +#define KVMPPC_NR_LPIDS (LPID_RSVD + 1) + /* Maximum number of threads per physical core */ #define MAX_SMT_THREADS 8 diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index c9cc42f73b3c..c35d4f2c4d90 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -46,6 +46,7 @@ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #include /* for MAX_SMT_THREADS */ #define KVM_MAX_VCPU_ID (MAX_SMT_THREADS * KVM_MAX_VCORES) +#define KVM_MAX_NESTED_GUESTS KVMPPC_NR_LPIDS #else #define KVM_MAX_VCPU_ID KVM_MAX_VCPUS @@ -287,6 +288,7 @@ struct kvm_arch { u8 radix; u8 fwnmi_enabled; bool threads_indep; + bool nested_enable; pgd_t *pgtable; u64 process_table; struct dentry *debugfs_dir; @@ -312,6 +314,9 @@ struct kvm_arch { #endif struct kvmppc_ops *kvm_ops; #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + u64 l1_ptcr; + int max_nested_lpid; + struct kvm_nested_guest *nested_guests[KVM_MAX_NESTED_GUESTS]; /* This array can grow quite large, keep it at the end */ struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; #endif diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index f872c04bb5b1..e814f40ab836 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -75,7 +75,8 @@ kvm-hv-y += \ book3s_hv.o \ book3s_hv_interrupts.o \ book3s_64_mmu_hv.o \ - book3s_64_mmu_radix.o + book3s_64_mmu_radix.o \ + book3s_hv_nested.o kvm-hv-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \ book3s_hv_tm.o diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 8c20a90a6851..d8fc49effab0 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -934,6 +934,19 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) if (ret == H_TOO_HARD) return RESUME_HOST; break; + + case H_SET_PARTITION_TABLE: + ret = H_FUNCTION; + if (vcpu->kvm->arch.nested_enable) + ret = kvmhv_set_partition_table(vcpu); + break; + case H_ENTER_NESTED: + ret = H_FUNCTION; + break; + case H_TLB_INVALIDATE: + ret = H_FUNCTION; + break; + default: return RESUME_HOST; } @@ -4157,8 +4170,7 @@ void kvmppc_setup_partition_table(struct kvm *kvm) __pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE; dw1 = PATB_GR | kvm->arch.process_table; } - - mmu_partition_table_set_entry(kvm->arch.lpid, dw0, dw1); + kvmhv_set_ptbl_entry(kvm->arch.lpid, dw0, dw1); } /* @@ -4254,6 +4266,10 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) /* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm) { + if (kvm->arch.nested_enable) { + kvm->arch.nested_enable = false; + kvmhv_release_all_nested(kvm); + } kvmppc_free_radix(kvm); kvmppc_update_lpcr(kvm, LPCR_VPM1, LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR); @@ -4374,6 +4390,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) kvmppc_alloc_host_rm_ops(); + kvmhv_vm_nested_init(kvm); + /* * Since we don't flush the TLB when tearing down a VM, * and this lpid might have previously been used, @@ -4517,8 +4535,10 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) /* Perform global invalidation and return lpid to the pool */ if (cpu_has_feature(CPU_FTR_ARCH_300)) { + if (kvm->arch.nested_enable) + kvmhv_release_all_nested(kvm); kvm->arch.process_table = 0; - kvmppc_setup_partition_table(kvm); + kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); } kvmppc_free_lpid(kvm->arch.lpid); @@ -4989,6 +5009,10 @@ static int kvmppc_book3s_init_hv(void) if (r < 0) return -ENODEV; + r = kvmhv_nested_init(); + if (r) + return r; + r = kvm_init_subcore_bitmap(); if (r) return r; @@ -5047,6 +5071,7 @@ static void kvmppc_book3s_exit_hv(void) if (kvmppc_radix_possible()) kvmppc_radix_exit(); kvmppc_hv_ops = NULL; + kvmhv_nested_exit(); } module_init(kvmppc_book3s_init_hv); diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c new file mode 100644 index 000000000000..327826248314 --- /dev/null +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -0,0 +1,301 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corporation, 2018 + * Authors Suraj Jitindar Singh + * Paul Mackerras + * + * Description: KVM functions specific to running nested KVM-HV guests + * on Book3S processors (specifically POWER9 and later). + */ + +#include +#include + +#include +#include +#include +#include + +static struct patb_entry *pseries_partition_tb; + +static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp); + +long kvmhv_nested_init(void) +{ + long int ptb_order; + unsigned long ptcr; + long rc; + + if (!kvmhv_on_pseries()) + return 0; + if (!radix_enabled()) + return -ENODEV; + + /* find log base 2 of KVMPPC_NR_LPIDS, rounding up */ + ptb_order = __ilog2(KVMPPC_NR_LPIDS - 1) + 1; + if (ptb_order < 8) + ptb_order = 8; + pseries_partition_tb = kmalloc(sizeof(struct patb_entry) << ptb_order, + GFP_KERNEL); + if (!pseries_partition_tb) { + pr_err("kvm-hv: failed to allocated nested partition table\n"); + return -ENOMEM; + } + + ptcr = __pa(pseries_partition_tb) | (ptb_order - 8); + rc = plpar_hcall_norets(H_SET_PARTITION_TABLE, ptcr); + if (rc != H_SUCCESS) { + pr_err("kvm-hv: Parent hypervisor does not support nesting (rc=%ld)\n", + rc); + kfree(pseries_partition_tb); + pseries_partition_tb = NULL; + return -ENODEV; + } + + return 0; +} + +void kvmhv_nested_exit(void) +{ + /* + * N.B. the kvmhv_on_pseries() test is there because it enables + * the compiler to remove the call to plpar_hcall_norets() + * when CONFIG_PPC_PSERIES=n. + */ + if (kvmhv_on_pseries() && pseries_partition_tb) { + plpar_hcall_norets(H_SET_PARTITION_TABLE, 0); + kfree(pseries_partition_tb); + pseries_partition_tb = NULL; + } +} + +void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1) +{ + if (cpu_has_feature(CPU_FTR_HVMODE)) { + mmu_partition_table_set_entry(lpid, dw0, dw1); + } else { + pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0); + pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1); + } +} + +static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp) +{ + unsigned long dw0; + + dw0 = PATB_HR | radix__get_tree_size() | + __pa(gp->shadow_pgtable) | RADIX_PGD_INDEX_SIZE; + kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table); +} + +void kvmhv_vm_nested_init(struct kvm *kvm) +{ + kvm->arch.max_nested_lpid = -1; +} + +/* + * Handle the H_SET_PARTITION_TABLE hcall. + * r4 = guest real address of partition table + log_2(size) - 12 + * (formatted as for the PTCR). + */ +long kvmhv_set_partition_table(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = vcpu->kvm; + unsigned long ptcr = kvmppc_get_gpr(vcpu, 4); + int srcu_idx; + long ret = H_SUCCESS; + + srcu_idx = srcu_read_lock(&kvm->srcu); + /* + * Limit the partition table to 4096 entries (because that's what + * hardware supports), and check the base address. + */ + if ((ptcr & PRTS_MASK) > 12 - 8 || + !kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT)) + ret = H_PARAMETER; + srcu_read_unlock(&kvm->srcu, srcu_idx); + if (ret == H_SUCCESS) + kvm->arch.l1_ptcr = ptcr; + return ret; +} + +/* + * Reload the partition table entry for a guest. + * Caller must hold gp->tlb_lock. + */ +static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp) +{ + int ret; + struct patb_entry ptbl_entry; + unsigned long ptbl_addr; + struct kvm *kvm = gp->l1_host; + + ret = -EFAULT; + ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4); + if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 8))) + ret = kvm_read_guest(kvm, ptbl_addr, + &ptbl_entry, sizeof(ptbl_entry)); + if (ret) { + gp->l1_gr_to_hr = 0; + gp->process_table = 0; + } else { + gp->l1_gr_to_hr = be64_to_cpu(ptbl_entry.patb0); + gp->process_table = be64_to_cpu(ptbl_entry.patb1); + } + kvmhv_set_nested_ptbl(gp); +} + +struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid) +{ + struct kvm_nested_guest *gp; + long shadow_lpid; + + gp = kzalloc(sizeof(*gp), GFP_KERNEL); + if (!gp) + return NULL; + gp->l1_host = kvm; + gp->l1_lpid = lpid; + mutex_init(&gp->tlb_lock); + gp->shadow_pgtable = pgd_alloc(kvm->mm); + if (!gp->shadow_pgtable) + goto out_free; + shadow_lpid = kvmppc_alloc_lpid(); + if (shadow_lpid < 0) + goto out_free2; + gp->shadow_lpid = shadow_lpid; + + return gp; + + out_free2: + pgd_free(kvm->mm, gp->shadow_pgtable); + out_free: + kfree(gp); + return NULL; +} + +/* + * Free up any resources allocated for a nested guest. + */ +static void kvmhv_release_nested(struct kvm_nested_guest *gp) +{ + kvmhv_set_ptbl_entry(gp->shadow_lpid, 0, 0); + kvmppc_free_lpid(gp->shadow_lpid); + if (gp->shadow_pgtable) + pgd_free(gp->l1_host->mm, gp->shadow_pgtable); + kfree(gp); +} + +static void kvmhv_remove_nested(struct kvm_nested_guest *gp) +{ + struct kvm *kvm = gp->l1_host; + int lpid = gp->l1_lpid; + long ref; + + spin_lock(&kvm->mmu_lock); + if (gp == kvm->arch.nested_guests[lpid]) { + kvm->arch.nested_guests[lpid] = NULL; + if (lpid == kvm->arch.max_nested_lpid) { + while (--lpid >= 0 && !kvm->arch.nested_guests[lpid]) + ; + kvm->arch.max_nested_lpid = lpid; + } + --gp->refcnt; + } + ref = gp->refcnt; + spin_unlock(&kvm->mmu_lock); + if (ref == 0) + kvmhv_release_nested(gp); +} + +/* + * Free up all nested resources allocated for this guest. + * This is called with no vcpus of the guest running, when + * switching the guest to HPT mode or when destroying the + * guest. + */ +void kvmhv_release_all_nested(struct kvm *kvm) +{ + int i; + struct kvm_nested_guest *gp; + struct kvm_nested_guest *freelist = NULL; + + spin_lock(&kvm->mmu_lock); + for (i = 0; i <= kvm->arch.max_nested_lpid; i++) { + gp = kvm->arch.nested_guests[i]; + if (!gp) + continue; + kvm->arch.nested_guests[i] = NULL; + if (--gp->refcnt == 0) { + gp->next = freelist; + freelist = gp; + } + } + kvm->arch.max_nested_lpid = -1; + spin_unlock(&kvm->mmu_lock); + while ((gp = freelist) != NULL) { + freelist = gp->next; + kvmhv_release_nested(gp); + } +} + +/* caller must hold gp->tlb_lock */ +void kvmhv_flush_nested(struct kvm_nested_guest *gp) +{ + kvmhv_update_ptbl_cache(gp); + if (gp->l1_gr_to_hr == 0) + kvmhv_remove_nested(gp); +} + +struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, + bool create) +{ + struct kvm_nested_guest *gp, *newgp; + + if (l1_lpid >= KVM_MAX_NESTED_GUESTS || + l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4))) + return NULL; + + spin_lock(&kvm->mmu_lock); + gp = kvm->arch.nested_guests[l1_lpid]; + if (gp) + ++gp->refcnt; + spin_unlock(&kvm->mmu_lock); + + if (gp || !create) + return gp; + + newgp = kvmhv_alloc_nested(kvm, l1_lpid); + if (!newgp) + return NULL; + spin_lock(&kvm->mmu_lock); + if (kvm->arch.nested_guests[l1_lpid]) { + /* someone else beat us to it */ + gp = kvm->arch.nested_guests[l1_lpid]; + } else { + kvm->arch.nested_guests[l1_lpid] = newgp; + ++newgp->refcnt; + gp = newgp; + newgp = NULL; + if (l1_lpid > kvm->arch.max_nested_lpid) + kvm->arch.max_nested_lpid = l1_lpid; + } + ++gp->refcnt; + spin_unlock(&kvm->mmu_lock); + + if (newgp) + kvmhv_release_nested(newgp); + + return gp; +} + +void kvmhv_put_nested(struct kvm_nested_guest *gp) +{ + struct kvm *kvm = gp->l1_host; + long ref; + + spin_lock(&kvm->mmu_lock); + ref = --gp->refcnt; + spin_unlock(&kvm->mmu_lock); + if (ref == 0) + kvmhv_release_nested(gp); +} From 360cae313702cdd0b90f82c261a8302fecef030a Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 8 Oct 2018 16:31:04 +1100 Subject: [PATCH 079/204] KVM: PPC: Book3S HV: Nested guest entry via hypercall This adds a new hypercall, H_ENTER_NESTED, which is used by a nested hypervisor to enter one of its nested guests. The hypercall supplies register values in two structs. Those values are copied by the level 0 (L0) hypervisor (the one which is running in hypervisor mode) into the vcpu struct of the L1 guest, and then the guest is run until an interrupt or error occurs which needs to be reported to L1 via the hypercall return value. Currently this assumes that the L0 and L1 hypervisors are the same endianness, and the structs passed as arguments are in native endianness. If they are of different endianness, the version number check will fail and the hcall will be rejected. Nested hypervisors do not support indep_threads_mode=N, so this adds code to print a warning message if the administrator has set indep_threads_mode=N, and treat it as Y. Reviewed-by: David Gibson Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/hvcall.h | 36 ++++ arch/powerpc/include/asm/kvm_book3s.h | 7 + arch/powerpc/include/asm/kvm_host.h | 5 + arch/powerpc/kernel/asm-offsets.c | 1 + arch/powerpc/kvm/book3s_hv.c | 214 ++++++++++++++++++---- arch/powerpc/kvm/book3s_hv_nested.c | 230 ++++++++++++++++++++++++ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 8 + 7 files changed, 471 insertions(+), 30 deletions(-) diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index c95c6518bf27..45e8789bb770 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -466,6 +466,42 @@ struct h_cpu_char_result { u64 behaviour; }; +/* Register state for entering a nested guest with H_ENTER_NESTED */ +struct hv_guest_state { + u64 version; /* version of this structure layout */ + u32 lpid; + u32 vcpu_token; + /* These registers are hypervisor privileged (at least for writing) */ + u64 lpcr; + u64 pcr; + u64 amor; + u64 dpdes; + u64 hfscr; + s64 tb_offset; + u64 dawr0; + u64 dawrx0; + u64 ciabr; + u64 hdec_expiry; + u64 purr; + u64 spurr; + u64 ic; + u64 vtb; + u64 hdar; + u64 hdsisr; + u64 heir; + u64 asdr; + /* These are OS privileged but need to be set late in guest entry */ + u64 srr0; + u64 srr1; + u64 sprg[4]; + u64 pidr; + u64 cfar; + u64 ppr; +}; + +/* Latest version of hv_guest_state structure */ +#define HV_GUEST_STATE_VERSION 1 + #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_HVCALL_H */ diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 43f212e38b89..093fd700da32 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -280,6 +280,13 @@ void kvmhv_vm_nested_init(struct kvm *kvm); long kvmhv_set_partition_table(struct kvm_vcpu *vcpu); void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1); void kvmhv_release_all_nested(struct kvm *kvm); +long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu); +int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu, + u64 time_limit, unsigned long lpcr); +void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr); +void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu, + struct hv_guest_state *hr); +long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu); void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index c35d4f2c4d90..ceb9f20a0b24 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -95,6 +95,7 @@ struct dtl_entry; struct kvmppc_vcpu_book3s; struct kvmppc_book3s_shadow_vcpu; +struct kvm_nested_guest; struct kvm_vm_stat { ulong remote_tlb_flush; @@ -786,6 +787,10 @@ struct kvm_vcpu_arch { u32 emul_inst; u32 online; + + /* For support of nested guests */ + struct kvm_nested_guest *nested; + u32 nested_vcpu_id; #endif #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 7c3738d890e8..d0abcbbdc700 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -503,6 +503,7 @@ int main(void) OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr); OFFSET(VCPU_VPA_DIRTY, kvm_vcpu, arch.vpa.dirty); OFFSET(VCPU_HEIR, kvm_vcpu, arch.emul_inst); + OFFSET(VCPU_NESTED, kvm_vcpu, arch.nested); OFFSET(VCPU_CPU, kvm_vcpu, cpu); OFFSET(VCPU_THREAD_CPU, kvm_vcpu, arch.thread_cpu); #endif diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index d8fc49effab0..7a2f344af4f8 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -942,6 +942,13 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) break; case H_ENTER_NESTED: ret = H_FUNCTION; + if (!vcpu->kvm->arch.nested_enable) + break; + ret = kvmhv_enter_nested_guest(vcpu); + if (ret == H_INTERRUPT) { + kvmppc_set_gpr(vcpu, 3, 0); + return -EINTR; + } break; case H_TLB_INVALIDATE: ret = H_FUNCTION; @@ -1272,6 +1279,104 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, return r; } +static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu) +{ + int r; + int srcu_idx; + + vcpu->stat.sum_exits++; + + /* + * This can happen if an interrupt occurs in the last stages + * of guest entry or the first stages of guest exit (i.e. after + * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV + * and before setting it to KVM_GUEST_MODE_HOST_HV). + * That can happen due to a bug, or due to a machine check + * occurring at just the wrong time. + */ + if (vcpu->arch.shregs.msr & MSR_HV) { + pr_emerg("KVM trap in HV mode while nested!\n"); + pr_emerg("trap=0x%x | pc=0x%lx | msr=0x%llx\n", + vcpu->arch.trap, kvmppc_get_pc(vcpu), + vcpu->arch.shregs.msr); + kvmppc_dump_regs(vcpu); + return RESUME_HOST; + } + switch (vcpu->arch.trap) { + /* We're good on these - the host merely wanted to get our attention */ + case BOOK3S_INTERRUPT_HV_DECREMENTER: + vcpu->stat.dec_exits++; + r = RESUME_GUEST; + break; + case BOOK3S_INTERRUPT_EXTERNAL: + vcpu->stat.ext_intr_exits++; + r = RESUME_HOST; + break; + case BOOK3S_INTERRUPT_H_DOORBELL: + case BOOK3S_INTERRUPT_H_VIRT: + vcpu->stat.ext_intr_exits++; + r = RESUME_GUEST; + break; + /* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/ + case BOOK3S_INTERRUPT_HMI: + case BOOK3S_INTERRUPT_PERFMON: + case BOOK3S_INTERRUPT_SYSTEM_RESET: + r = RESUME_GUEST; + break; + case BOOK3S_INTERRUPT_MACHINE_CHECK: + /* Pass the machine check to the L1 guest */ + r = RESUME_HOST; + /* Print the MCE event to host console. */ + machine_check_print_event_info(&vcpu->arch.mce_evt, false); + break; + /* + * We get these next two if the guest accesses a page which it thinks + * it has mapped but which is not actually present, either because + * it is for an emulated I/O device or because the corresonding + * host page has been paged out. + */ + case BOOK3S_INTERRUPT_H_DATA_STORAGE: + srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + r = kvmhv_nested_page_fault(vcpu); + srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); + break; + case BOOK3S_INTERRUPT_H_INST_STORAGE: + vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); + vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) & + DSISR_SRR1_MATCH_64S; + if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) + vcpu->arch.fault_dsisr |= DSISR_ISSTORE; + srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + r = kvmhv_nested_page_fault(vcpu); + srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); + break; + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + case BOOK3S_INTERRUPT_HV_SOFTPATCH: + /* + * This occurs for various TM-related instructions that + * we need to emulate on POWER9 DD2.2. We have already + * handled the cases where the guest was in real-suspend + * mode and was transitioning to transactional state. + */ + r = kvmhv_p9_tm_emulation(vcpu); + break; +#endif + + case BOOK3S_INTERRUPT_HV_RM_HARD: + vcpu->arch.trap = 0; + r = RESUME_GUEST; + if (!xive_enabled()) + kvmppc_xics_rm_complete(vcpu, 0); + break; + default: + r = RESUME_HOST; + break; + } + + return r; +} + static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { @@ -3098,7 +3203,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) /* * Load up hypervisor-mode registers on P9. */ -static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit) +static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, + unsigned long lpcr) { struct kvmppc_vcore *vc = vcpu->arch.vcore; s64 hdec; @@ -3155,7 +3261,7 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit) mtspr(SPRN_AMOR, ~0UL); - mtspr(SPRN_LPCR, vc->lpcr); + mtspr(SPRN_LPCR, lpcr); isync(); kvmppc_xive_push_vcpu(vcpu); @@ -3225,7 +3331,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit) * Virtual-mode guest entry for POWER9 and later when the host and * guest are both using the radix MMU. The LPIDR has already been set. */ -int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit) +int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, + unsigned long lpcr) { struct kvmppc_vcore *vc = vcpu->arch.vcore; unsigned long host_dscr = mfspr(SPRN_DSCR); @@ -3291,13 +3398,31 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit) mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb()); - if (vcpu->arch.doorbell_request) { - vc->dpdes = 1; - smp_wmb(); - vcpu->arch.doorbell_request = 0; - } + if (kvmhv_on_pseries()) { + /* call our hypervisor to load up HV regs and go */ + struct hv_guest_state hvregs; - trap = kvmhv_load_hv_regs_and_go(vcpu, time_limit); + kvmhv_save_hv_regs(vcpu, &hvregs); + hvregs.lpcr = lpcr; + vcpu->arch.regs.msr = vcpu->arch.shregs.msr; + hvregs.version = HV_GUEST_STATE_VERSION; + if (vcpu->arch.nested) { + hvregs.lpid = vcpu->arch.nested->shadow_lpid; + hvregs.vcpu_token = vcpu->arch.nested_vcpu_id; + } else { + hvregs.lpid = vcpu->kvm->arch.lpid; + hvregs.vcpu_token = vcpu->vcpu_id; + } + hvregs.hdec_expiry = time_limit; + trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs), + __pa(&vcpu->arch.regs)); + kvmhv_restore_hv_return_state(vcpu, &hvregs); + vcpu->arch.shregs.msr = vcpu->arch.regs.msr; + vcpu->arch.shregs.dar = mfspr(SPRN_DAR); + vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR); + } else { + trap = kvmhv_load_hv_regs_and_go(vcpu, time_limit, lpcr); + } vcpu->arch.slb_max = 0; dec = mfspr(SPRN_DEC); @@ -3539,6 +3664,11 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) trace_kvmppc_vcore_wakeup(do_sleep, block_ns); } +/* + * This never fails for a radix guest, as none of the operations it does + * for a radix guest can fail or have a way to report failure. + * kvmhv_run_single_vcpu() relies on this fact. + */ static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu) { int r = 0; @@ -3688,13 +3818,16 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) return vcpu->arch.ret; } -static int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, - struct kvm_vcpu *vcpu, u64 time_limit) +int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, + struct kvm_vcpu *vcpu, u64 time_limit, + unsigned long lpcr) { int trap, r, pcpu, pcpu0; int srcu_idx; struct kvmppc_vcore *vc; struct kvm *kvm = vcpu->kvm; + struct kvm_nested_guest *nested = vcpu->arch.nested; + unsigned long lpid; trace_kvmppc_run_vcpu_enter(vcpu); @@ -3715,16 +3848,8 @@ static int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, vc->runner = vcpu; /* See if the MMU is ready to go */ - if (!kvm->arch.mmu_ready) { - r = kvmhv_setup_mmu(vcpu); - if (r) { - kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; - kvm_run->fail_entry. - hardware_entry_failure_reason = 0; - vcpu->arch.ret = r; - goto out; - } - } + if (!kvm->arch.mmu_ready) + kvmhv_setup_mmu(vcpu); if (need_resched()) cond_resched(); @@ -3746,7 +3871,22 @@ static int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, if (lazy_irq_pending() || need_resched() || !kvm->arch.mmu_ready) goto out; - kvmppc_core_prepare_to_enter(vcpu); + if (!nested) { + kvmppc_core_prepare_to_enter(vcpu); + if (vcpu->arch.doorbell_request) { + vc->dpdes = 1; + smp_wmb(); + vcpu->arch.doorbell_request = 0; + } + if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, + &vcpu->arch.pending_exceptions)) + lpcr |= LPCR_MER; + } else if (vcpu->arch.pending_exceptions || + vcpu->arch.doorbell_request || + xive_interrupt_pending(vcpu)) { + vcpu->arch.ret = RESUME_HOST; + goto out; + } kvmppc_clear_host_core(pcpu); @@ -3760,7 +3900,10 @@ static int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, vc->vcore_state = VCORE_RUNNING; trace_kvmppc_run_core(vc, 0); - mtspr(SPRN_LPID, vc->kvm->arch.lpid); + lpid = vc->kvm->arch.lpid; + if (nested) + lpid = nested->shadow_lpid; + mtspr(SPRN_LPID, lpid); isync(); /* See comment above in kvmppc_run_core() about this */ @@ -3769,7 +3912,7 @@ static int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, pcpu0 &= ~0x3UL; if (cpumask_test_cpu(pcpu0, &kvm->arch.need_tlb_flush)) { - radix__local_flush_tlb_lpid_guest(kvm->arch.lpid); + radix__local_flush_tlb_lpid_guest(lpid); /* Clear the bit after the TLB flush */ cpumask_clear_cpu(pcpu0, &kvm->arch.need_tlb_flush); } @@ -3781,7 +3924,7 @@ static int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, this_cpu_disable_ftrace(); - trap = kvmhv_p9_guest_entry(vcpu, time_limit); + trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr); vcpu->arch.trap = trap; this_cpu_enable_ftrace(); @@ -3809,8 +3952,12 @@ static int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, trace_kvm_guest_exit(vcpu); r = RESUME_GUEST; - if (trap) - r = kvmppc_handle_exit_hv(kvm_run, vcpu, current); + if (trap) { + if (!nested) + r = kvmppc_handle_exit_hv(kvm_run, vcpu, current); + else + r = kvmppc_handle_nested_exit(vcpu); + } vcpu->arch.ret = r; if (is_kvmppc_resume_guest(r) && vcpu->arch.ceded && @@ -3925,7 +4072,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) do { if (kvm->arch.threads_indep && kvm_is_radix(kvm)) - r = kvmhv_run_single_vcpu(run, vcpu, ~(u64)0); + r = kvmhv_run_single_vcpu(run, vcpu, ~(u64)0, + vcpu->arch.vcore->lpcr); else r = kvmppc_run_vcpu(run, vcpu); @@ -4479,8 +4627,14 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) * On POWER9, we only need to do this if the "indep_threads_mode" * module parameter has been set to N. */ - if (cpu_has_feature(CPU_FTR_ARCH_300)) - kvm->arch.threads_indep = indep_threads_mode; + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + if (!indep_threads_mode && !cpu_has_feature(CPU_FTR_HVMODE)) { + pr_warn("KVM: Ignoring indep_threads_mode=N in nested hypervisor\n"); + kvm->arch.threads_indep = true; + } else { + kvm->arch.threads_indep = indep_threads_mode; + } + } if (!kvm->arch.threads_indep) kvm_hv_vm_activated(); diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 327826248314..5b9fd7c8d931 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -20,6 +20,231 @@ static struct patb_entry *pseries_partition_tb; static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp); +void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) +{ + struct kvmppc_vcore *vc = vcpu->arch.vcore; + + hr->pcr = vc->pcr; + hr->dpdes = vc->dpdes; + hr->hfscr = vcpu->arch.hfscr; + hr->tb_offset = vc->tb_offset; + hr->dawr0 = vcpu->arch.dawr; + hr->dawrx0 = vcpu->arch.dawrx; + hr->ciabr = vcpu->arch.ciabr; + hr->purr = vcpu->arch.purr; + hr->spurr = vcpu->arch.spurr; + hr->ic = vcpu->arch.ic; + hr->vtb = vc->vtb; + hr->srr0 = vcpu->arch.shregs.srr0; + hr->srr1 = vcpu->arch.shregs.srr1; + hr->sprg[0] = vcpu->arch.shregs.sprg0; + hr->sprg[1] = vcpu->arch.shregs.sprg1; + hr->sprg[2] = vcpu->arch.shregs.sprg2; + hr->sprg[3] = vcpu->arch.shregs.sprg3; + hr->pidr = vcpu->arch.pid; + hr->cfar = vcpu->arch.cfar; + hr->ppr = vcpu->arch.ppr; +} + +static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap, + struct hv_guest_state *hr) +{ + struct kvmppc_vcore *vc = vcpu->arch.vcore; + + hr->dpdes = vc->dpdes; + hr->hfscr = vcpu->arch.hfscr; + hr->purr = vcpu->arch.purr; + hr->spurr = vcpu->arch.spurr; + hr->ic = vcpu->arch.ic; + hr->vtb = vc->vtb; + hr->srr0 = vcpu->arch.shregs.srr0; + hr->srr1 = vcpu->arch.shregs.srr1; + hr->sprg[0] = vcpu->arch.shregs.sprg0; + hr->sprg[1] = vcpu->arch.shregs.sprg1; + hr->sprg[2] = vcpu->arch.shregs.sprg2; + hr->sprg[3] = vcpu->arch.shregs.sprg3; + hr->pidr = vcpu->arch.pid; + hr->cfar = vcpu->arch.cfar; + hr->ppr = vcpu->arch.ppr; + switch (trap) { + case BOOK3S_INTERRUPT_H_DATA_STORAGE: + hr->hdar = vcpu->arch.fault_dar; + hr->hdsisr = vcpu->arch.fault_dsisr; + hr->asdr = vcpu->arch.fault_gpa; + break; + case BOOK3S_INTERRUPT_H_INST_STORAGE: + hr->asdr = vcpu->arch.fault_gpa; + break; + case BOOK3S_INTERRUPT_H_EMUL_ASSIST: + hr->heir = vcpu->arch.emul_inst; + break; + } +} + +static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) +{ + struct kvmppc_vcore *vc = vcpu->arch.vcore; + + vc->pcr = hr->pcr; + vc->dpdes = hr->dpdes; + vcpu->arch.hfscr = hr->hfscr; + vcpu->arch.dawr = hr->dawr0; + vcpu->arch.dawrx = hr->dawrx0; + vcpu->arch.ciabr = hr->ciabr; + vcpu->arch.purr = hr->purr; + vcpu->arch.spurr = hr->spurr; + vcpu->arch.ic = hr->ic; + vc->vtb = hr->vtb; + vcpu->arch.shregs.srr0 = hr->srr0; + vcpu->arch.shregs.srr1 = hr->srr1; + vcpu->arch.shregs.sprg0 = hr->sprg[0]; + vcpu->arch.shregs.sprg1 = hr->sprg[1]; + vcpu->arch.shregs.sprg2 = hr->sprg[2]; + vcpu->arch.shregs.sprg3 = hr->sprg[3]; + vcpu->arch.pid = hr->pidr; + vcpu->arch.cfar = hr->cfar; + vcpu->arch.ppr = hr->ppr; +} + +void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu, + struct hv_guest_state *hr) +{ + struct kvmppc_vcore *vc = vcpu->arch.vcore; + + vc->dpdes = hr->dpdes; + vcpu->arch.hfscr = hr->hfscr; + vcpu->arch.purr = hr->purr; + vcpu->arch.spurr = hr->spurr; + vcpu->arch.ic = hr->ic; + vc->vtb = hr->vtb; + vcpu->arch.fault_dar = hr->hdar; + vcpu->arch.fault_dsisr = hr->hdsisr; + vcpu->arch.fault_gpa = hr->asdr; + vcpu->arch.emul_inst = hr->heir; + vcpu->arch.shregs.srr0 = hr->srr0; + vcpu->arch.shregs.srr1 = hr->srr1; + vcpu->arch.shregs.sprg0 = hr->sprg[0]; + vcpu->arch.shregs.sprg1 = hr->sprg[1]; + vcpu->arch.shregs.sprg2 = hr->sprg[2]; + vcpu->arch.shregs.sprg3 = hr->sprg[3]; + vcpu->arch.pid = hr->pidr; + vcpu->arch.cfar = hr->cfar; + vcpu->arch.ppr = hr->ppr; +} + +long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) +{ + long int err, r; + struct kvm_nested_guest *l2; + struct pt_regs l2_regs, saved_l1_regs; + struct hv_guest_state l2_hv, saved_l1_hv; + struct kvmppc_vcore *vc = vcpu->arch.vcore; + u64 hv_ptr, regs_ptr; + u64 hdec_exp; + s64 delta_purr, delta_spurr, delta_ic, delta_vtb; + u64 mask; + unsigned long lpcr; + + if (vcpu->kvm->arch.l1_ptcr == 0) + return H_NOT_AVAILABLE; + + /* copy parameters in */ + hv_ptr = kvmppc_get_gpr(vcpu, 4); + err = kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv, + sizeof(struct hv_guest_state)); + if (err) + return H_PARAMETER; + if (l2_hv.version != HV_GUEST_STATE_VERSION) + return H_P2; + + regs_ptr = kvmppc_get_gpr(vcpu, 5); + err = kvm_vcpu_read_guest(vcpu, regs_ptr, &l2_regs, + sizeof(struct pt_regs)); + if (err) + return H_PARAMETER; + + /* translate lpid */ + l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true); + if (!l2) + return H_PARAMETER; + if (!l2->l1_gr_to_hr) { + mutex_lock(&l2->tlb_lock); + kvmhv_update_ptbl_cache(l2); + mutex_unlock(&l2->tlb_lock); + } + + /* save l1 values of things */ + vcpu->arch.regs.msr = vcpu->arch.shregs.msr; + saved_l1_regs = vcpu->arch.regs; + kvmhv_save_hv_regs(vcpu, &saved_l1_hv); + + /* convert TB values/offsets to host (L0) values */ + hdec_exp = l2_hv.hdec_expiry - vc->tb_offset; + vc->tb_offset += l2_hv.tb_offset; + + /* set L1 state to L2 state */ + vcpu->arch.nested = l2; + vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token; + vcpu->arch.regs = l2_regs; + vcpu->arch.shregs.msr = vcpu->arch.regs.msr; + mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD | + LPCR_LPES | LPCR_MER; + lpcr = (vc->lpcr & ~mask) | (l2_hv.lpcr & mask); + restore_hv_regs(vcpu, &l2_hv); + + vcpu->arch.ret = RESUME_GUEST; + vcpu->arch.trap = 0; + do { + if (mftb() >= hdec_exp) { + vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER; + r = RESUME_HOST; + break; + } + r = kvmhv_run_single_vcpu(vcpu->arch.kvm_run, vcpu, hdec_exp, + lpcr); + } while (is_kvmppc_resume_guest(r)); + + /* save L2 state for return */ + l2_regs = vcpu->arch.regs; + l2_regs.msr = vcpu->arch.shregs.msr; + delta_purr = vcpu->arch.purr - l2_hv.purr; + delta_spurr = vcpu->arch.spurr - l2_hv.spurr; + delta_ic = vcpu->arch.ic - l2_hv.ic; + delta_vtb = vc->vtb - l2_hv.vtb; + save_hv_return_state(vcpu, vcpu->arch.trap, &l2_hv); + + /* restore L1 state */ + vcpu->arch.nested = NULL; + vcpu->arch.regs = saved_l1_regs; + vcpu->arch.shregs.msr = saved_l1_regs.msr & ~MSR_TS_MASK; + /* set L1 MSR TS field according to L2 transaction state */ + if (l2_regs.msr & MSR_TS_MASK) + vcpu->arch.shregs.msr |= MSR_TS_S; + vc->tb_offset = saved_l1_hv.tb_offset; + restore_hv_regs(vcpu, &saved_l1_hv); + vcpu->arch.purr += delta_purr; + vcpu->arch.spurr += delta_spurr; + vcpu->arch.ic += delta_ic; + vc->vtb += delta_vtb; + + kvmhv_put_nested(l2); + + /* copy l2_hv_state and regs back to guest */ + err = kvm_vcpu_write_guest(vcpu, hv_ptr, &l2_hv, + sizeof(struct hv_guest_state)); + if (err) + return H_AUTHORITY; + err = kvm_vcpu_write_guest(vcpu, regs_ptr, &l2_regs, + sizeof(struct pt_regs)); + if (err) + return H_AUTHORITY; + + if (r == -EINTR) + return H_INTERRUPT; + + return vcpu->arch.trap; +} + long kvmhv_nested_init(void) { long int ptb_order; @@ -299,3 +524,8 @@ void kvmhv_put_nested(struct kvm_nested_guest *gp) if (ref == 0) kvmhv_release_nested(gp); } + +long kvmhv_nested_page_fault(struct kvm_vcpu *vcpu) +{ + return RESUME_HOST; +} diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 9cea23866378..a945369f194c 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1393,6 +1393,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) BEGIN_FTR_SECTION PPC_MSGSYNC lwsync + /* always exit if we're running a nested guest */ + ld r0, VCPU_NESTED(r9) + cmpdi r0, 0 + bne guest_exit_cont END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) lbz r0, HSTATE_HOST_IPI(r13) cmpwi r0, 0 @@ -2199,6 +2203,10 @@ hcall_try_real_mode: andi. r0,r11,MSR_PR /* sc 1 from userspace - reflect to guest syscall */ bne sc_1_fast_return + /* sc 1 from nested guest - give it to L1 to handle */ + ld r0, VCPU_NESTED(r9) + cmpdi r0, 0 + bne guest_exit_cont clrrdi r3,r3,2 cmpldi r3,hcall_real_table_end - hcall_real_table bge guest_exit_cont From f3c18e9342a443528137a303f3c391d42d3bb394 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 8 Oct 2018 16:31:05 +1100 Subject: [PATCH 080/204] KVM: PPC: Book3S HV: Use XICS hypercalls when running as a nested hypervisor This adds code to call the H_IPI and H_EOI hypercalls when we are running as a nested hypervisor (i.e. without the CPU_FTR_HVMODE cpu feature) and we would otherwise access the XICS interrupt controller directly or via an OPAL call. Reviewed-by: David Gibson Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/kvm/book3s_hv.c | 7 ++++- arch/powerpc/kvm/book3s_hv_builtin.c | 44 +++++++++++++++++++++++----- arch/powerpc/kvm/book3s_hv_rm_xics.c | 8 +++++ 3 files changed, 50 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 7a2f344af4f8..e8e56c188dfc 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -173,6 +173,10 @@ static bool kvmppc_ipi_thread(int cpu) { unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); + /* If we're a nested hypervisor, fall back to ordinary IPIs for now */ + if (kvmhv_on_pseries()) + return false; + /* On POWER9 we can use msgsnd to IPI any cpu */ if (cpu_has_feature(CPU_FTR_ARCH_300)) { msg |= get_hard_smp_processor_id(cpu); @@ -5177,7 +5181,8 @@ static int kvmppc_book3s_init_hv(void) * indirectly, via OPAL. */ #ifdef CONFIG_SMP - if (!xive_enabled() && !local_paca->kvm_hstate.xics_phys) { + if (!xive_enabled() && !kvmhv_on_pseries() && + !local_paca->kvm_hstate.xics_phys) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc"); diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index ccfea5b96d89..a71e2fc00a4e 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -231,6 +231,15 @@ void kvmhv_rm_send_ipi(int cpu) void __iomem *xics_phys; unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); + /* For a nested hypervisor, use the XICS via hcall */ + if (kvmhv_on_pseries()) { + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu), + IPI_PRIORITY); + return; + } + /* On POWER9 we can use msgsnd for any destination cpu. */ if (cpu_has_feature(CPU_FTR_ARCH_300)) { msg |= get_hard_smp_processor_id(cpu); @@ -460,12 +469,19 @@ static long kvmppc_read_one_intr(bool *again) return 1; /* Now read the interrupt from the ICP */ - xics_phys = local_paca->kvm_hstate.xics_phys; - rc = 0; - if (!xics_phys) - rc = opal_int_get_xirr(&xirr, false); - else - xirr = __raw_rm_readl(xics_phys + XICS_XIRR); + if (kvmhv_on_pseries()) { + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + rc = plpar_hcall_raw(H_XIRR, retbuf, 0xFF); + xirr = cpu_to_be32(retbuf[0]); + } else { + xics_phys = local_paca->kvm_hstate.xics_phys; + rc = 0; + if (!xics_phys) + rc = opal_int_get_xirr(&xirr, false); + else + xirr = __raw_rm_readl(xics_phys + XICS_XIRR); + } if (rc < 0) return 1; @@ -494,7 +510,13 @@ static long kvmppc_read_one_intr(bool *again) */ if (xisr == XICS_IPI) { rc = 0; - if (xics_phys) { + if (kvmhv_on_pseries()) { + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + plpar_hcall_raw(H_IPI, retbuf, + hard_smp_processor_id(), 0xff); + plpar_hcall_raw(H_EOI, retbuf, h_xirr); + } else if (xics_phys) { __raw_rm_writeb(0xff, xics_phys + XICS_MFRR); __raw_rm_writel(xirr, xics_phys + XICS_XIRR); } else { @@ -520,7 +542,13 @@ static long kvmppc_read_one_intr(bool *again) /* We raced with the host, * we need to resend that IPI, bummer */ - if (xics_phys) + if (kvmhv_on_pseries()) { + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + plpar_hcall_raw(H_IPI, retbuf, + hard_smp_processor_id(), + IPI_PRIORITY); + } else if (xics_phys) __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR); else diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c index 8b9f35689648..b3f5786b20dc 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_xics.c +++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c @@ -767,6 +767,14 @@ static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again) void __iomem *xics_phys; int64_t rc; + if (kvmhv_on_pseries()) { + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + iosync(); + plpar_hcall_raw(H_EOI, retbuf, hwirq); + return; + } + rc = pnv_opal_pci_msi_eoi(c, hwirq); if (rc) From 4bad77799fede9d95abaf499fff385608ee14877 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 8 Oct 2018 16:31:06 +1100 Subject: [PATCH 081/204] KVM: PPC: Book3S HV: Handle hypercalls correctly when nested When we are running as a nested hypervisor, we use a hypercall to enter the guest rather than code in book3s_hv_rmhandlers.S. This means that the hypercall handlers listed in hcall_real_table never get called. There are some hypercalls that are handled there and not in kvmppc_pseries_do_hcall(), which therefore won't get processed for a nested guest. To fix this, we add cases to kvmppc_pseries_do_hcall() to handle those hypercalls, with the following exceptions: - The HPT hypercalls (H_ENTER, H_REMOVE, etc.) are not handled because we only support radix mode for nested guests. - H_CEDE has to be handled specially because the cede logic in kvmhv_run_single_vcpu assumes that it has been processed by the time that kvmhv_p9_guest_entry() returns. Therefore we put a special case for H_CEDE in kvmhv_p9_guest_entry(). For the XICS hypercalls, if real-mode processing is enabled, then the virtual-mode handlers assume that they are being called only to finish up the operation. Therefore we turn off the real-mode flag in the XICS code when running as a nested hypervisor. Reviewed-by: David Gibson Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/asm-prototypes.h | 4 +++ arch/powerpc/kvm/book3s_hv.c | 43 +++++++++++++++++++++++ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 2 ++ arch/powerpc/kvm/book3s_xics.c | 3 +- 4 files changed, 51 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 5c9b00cec4f2..c55ba3b4873b 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -167,4 +167,8 @@ void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu); int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu); +long kvmppc_h_set_dabr(struct kvm_vcpu *vcpu, unsigned long dabr); +long kvmppc_h_set_xdabr(struct kvm_vcpu *vcpu, unsigned long dabr, + unsigned long dabrx); + #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index e8e56c188dfc..a87912508f63 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -50,6 +50,7 @@ #include #include #include +#include #include #include #include @@ -915,6 +916,19 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) break; } return RESUME_HOST; + case H_SET_DABR: + ret = kvmppc_h_set_dabr(vcpu, kvmppc_get_gpr(vcpu, 4)); + break; + case H_SET_XDABR: + ret = kvmppc_h_set_xdabr(vcpu, kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5)); + break; + case H_GET_TCE: + ret = kvmppc_h_get_tce(vcpu, kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5)); + if (ret == H_TOO_HARD) + return RESUME_HOST; + break; case H_PUT_TCE: ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5), @@ -938,6 +952,10 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) if (ret == H_TOO_HARD) return RESUME_HOST; break; + case H_RANDOM: + if (!powernv_get_random_long(&vcpu->arch.regs.gpr[4])) + ret = H_HARDWARE; + break; case H_SET_PARTITION_TABLE: ret = H_FUNCTION; @@ -966,6 +984,24 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) return RESUME_GUEST; } +/* + * Handle H_CEDE in the nested virtualization case where we haven't + * called the real-mode hcall handlers in book3s_hv_rmhandlers.S. + * This has to be done early, not in kvmppc_pseries_do_hcall(), so + * that the cede logic in kvmppc_run_single_vcpu() works properly. + */ +static void kvmppc_nested_cede(struct kvm_vcpu *vcpu) +{ + vcpu->arch.shregs.msr |= MSR_EE; + vcpu->arch.ceded = 1; + smp_mb(); + if (vcpu->arch.prodded) { + vcpu->arch.prodded = 0; + smp_mb(); + vcpu->arch.ceded = 0; + } +} + static int kvmppc_hcall_impl_hv(unsigned long cmd) { switch (cmd) { @@ -3424,6 +3460,13 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, vcpu->arch.shregs.msr = vcpu->arch.regs.msr; vcpu->arch.shregs.dar = mfspr(SPRN_DAR); vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR); + + /* H_CEDE has to be handled now, not later */ + if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && + kvmppc_get_gpr(vcpu, 3) == H_CEDE) { + kvmppc_nested_cede(vcpu); + trap = 0; + } } else { trap = kvmhv_load_hv_regs_and_go(vcpu, time_limit, lpcr); } diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index a945369f194c..9b8d50a7cbaf 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -2466,6 +2466,7 @@ hcall_real_table: hcall_real_table_end: _GLOBAL(kvmppc_h_set_xdabr) +EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr) andi. r0, r5, DABRX_USER | DABRX_KERNEL beq 6f li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI @@ -2475,6 +2476,7 @@ _GLOBAL(kvmppc_h_set_xdabr) blr _GLOBAL(kvmppc_h_set_dabr) +EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr) li r5, DABRX_USER | DABRX_KERNEL 3: BEGIN_FTR_SECTION diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index d9ba1b06d0f5..b0b2bfc2ff51 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c @@ -1390,7 +1390,8 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type) } #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE - if (cpu_has_feature(CPU_FTR_ARCH_206)) { + if (cpu_has_feature(CPU_FTR_ARCH_206) && + cpu_has_feature(CPU_FTR_HVMODE)) { /* Enable real mode support */ xics->real_mode = ENABLE_REALMODE; xics->real_mode_dbg = DEBUG_REALMODE; From fd10be257312b5d883f89d62d691443e95678fdd Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Mon, 8 Oct 2018 16:31:07 +1100 Subject: [PATCH 082/204] KVM: PPC: Book3S HV: Handle page fault for a nested guest Consider a normal (L1) guest running under the main hypervisor (L0), and then a nested guest (L2) running under the L1 guest which is acting as a nested hypervisor. L0 has page tables to map the address space for L1 providing the translation from L1 real address -> L0 real address; L1 | | (L1 -> L0) | ----> L0 There are also page tables in L1 used to map the address space for L2 providing the translation from L2 real address -> L1 read address. Since the hardware can only walk a single level of page table, we need to maintain in L0 a "shadow_pgtable" for L2 which provides the translation from L2 real address -> L0 real address. Which looks like; L2 L2 | | | (L2 -> L1) | | | ----> L1 | (L2 -> L0) | | | (L1 -> L0) | | | ----> L0 --------> L0 When a page fault occurs while running a nested (L2) guest we need to insert a pte into this "shadow_pgtable" for the L2 -> L0 mapping. To do this we need to: 1. Walk the pgtable in L1 memory to find the L2 -> L1 mapping, and provide a page fault to L1 if this mapping doesn't exist. 2. Use our L1 -> L0 pgtable to convert this L1 address to an L0 address, or try to insert a pte for that mapping if it doesn't exist. 3. Now we have a L2 -> L0 mapping, insert this into our shadow_pgtable Once this mapping exists we can take rc faults when hardware is unable to automatically set the reference and change bits in the pte. On these we need to: 1. Check the rc bits on the L2 -> L1 pte match, and otherwise reflect the fault down to L1. 2. Set the rc bits in the L1 -> L0 pte which corresponds to the same host page. 3. Set the rc bits in the L2 -> L0 pte. As we reuse a large number of functions in book3s_64_mmu_radix.c for this we also needed to refactor a number of these functions to take an lpid parameter so that the correct lpid is used for tlb invalidations. The functionality however has remained the same. Reviewed-by: David Gibson Signed-off-by: Suraj Jitindar Singh Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- .../include/asm/book3s/64/tlbflush-radix.h | 1 + arch/powerpc/include/asm/kvm_book3s.h | 17 + arch/powerpc/include/asm/kvm_book3s_64.h | 4 + arch/powerpc/include/asm/kvm_host.h | 2 + arch/powerpc/kvm/book3s_64_mmu_radix.c | 194 +++++----- arch/powerpc/kvm/book3s_hv_nested.c | 332 +++++++++++++++++- arch/powerpc/mm/tlb-radix.c | 9 + 7 files changed, 473 insertions(+), 86 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h index 1154a6dc6d26..671316f9e95d 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h @@ -53,6 +53,7 @@ extern void radix__flush_tlb_lpid_page(unsigned int lpid, unsigned long addr, unsigned long page_size); extern void radix__flush_pwc_lpid(unsigned int lpid); +extern void radix__flush_tlb_lpid(unsigned int lpid); extern void radix__local_flush_tlb_lpid(unsigned int lpid); extern void radix__local_flush_tlb_lpid_guest(unsigned int lpid); diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 093fd700da32..63f7ccfac174 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -188,17 +188,34 @@ extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc); extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned long ea, unsigned long dsisr); +extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr, + struct kvmppc_pte *gpte, u64 root, + u64 *pte_ret_p); extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, u64 table, int table_index, u64 *pte_ret_p); extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, bool data, bool iswrite); +extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, + bool writing, unsigned long gpa, + unsigned int lpid); +extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, + unsigned long gpa, + struct kvm_memory_slot *memslot, + bool writing, bool kvm_ro, + pte_t *inserted_pte, unsigned int *levelp); extern int kvmppc_init_vm_radix(struct kvm *kvm); extern void kvmppc_free_radix(struct kvm *kvm); +extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, + unsigned int lpid); extern int kvmppc_radix_init(void); extern void kvmppc_radix_exit(void); extern int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn); +extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, + unsigned long gpa, unsigned int shift, + struct kvm_memory_slot *memslot, + unsigned int lpid); extern int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn); extern int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 6d67b6a9e784..5496152f70e1 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -549,6 +549,10 @@ static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu) } #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ +extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, + unsigned long gpa, unsigned int level, + unsigned long mmu_seq, unsigned int lpid); + #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #endif /* __ASM_KVM_BOOK3S_64_H__ */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index ceb9f20a0b24..fac6f631ed29 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -367,7 +367,9 @@ struct kvmppc_pte { bool may_write : 1; bool may_execute : 1; unsigned long wimg; + unsigned long rc; u8 page_size; /* MMU_PAGE_xxx */ + u8 page_shift; }; struct kvmppc_mmu { diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index bd06a955d190..c4b1a9e1e3ff 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -29,43 +29,16 @@ */ static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 }; -/* - * Used to walk a partition or process table radix tree in guest memory - * Note: We exploit the fact that a partition table and a process - * table have the same layout, a partition-scoped page table and a - * process-scoped page table have the same layout, and the 2nd - * doubleword of a partition table entry has the same layout as - * the PTCR register. - */ -int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, - struct kvmppc_pte *gpte, u64 table, - int table_index, u64 *pte_ret_p) +int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr, + struct kvmppc_pte *gpte, u64 root, + u64 *pte_ret_p) { struct kvm *kvm = vcpu->kvm; int ret, level, ps; - unsigned long ptbl, root; - unsigned long rts, bits, offset; - unsigned long size, index; - struct prtb_entry entry; + unsigned long rts, bits, offset, index; u64 pte, base, gpa; __be64 rpte; - if ((table & PRTS_MASK) > 24) - return -EINVAL; - size = 1ul << ((table & PRTS_MASK) + 12); - - /* Is the table big enough to contain this entry? */ - if ((table_index * sizeof(entry)) >= size) - return -EINVAL; - - /* Read the table to find the root of the radix tree */ - ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry)); - ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry)); - if (ret) - return ret; - - /* Root is stored in the first double word */ - root = be64_to_cpu(entry.prtb0); rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) | ((root & RTS2_MASK) >> RTS2_SHIFT); bits = root & RPDS_MASK; @@ -79,6 +52,7 @@ int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, /* Walk each level of the radix tree */ for (level = 3; level >= 0; --level) { + u64 addr; /* Check a valid size */ if (level && bits != p9_supported_radix_bits[level]) return -EINVAL; @@ -90,10 +64,13 @@ int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, if (base & ((1UL << (bits + 3)) - 1)) return -EINVAL; /* Read the entry from guest memory */ - ret = kvm_read_guest(kvm, base + (index * sizeof(rpte)), - &rpte, sizeof(rpte)); - if (ret) + addr = base + (index * sizeof(rpte)); + ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte)); + if (ret) { + if (pte_ret_p) + *pte_ret_p = addr; return ret; + } pte = __be64_to_cpu(rpte); if (!(pte & _PAGE_PRESENT)) return -ENOENT; @@ -119,6 +96,7 @@ int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, if (offset == mmu_psize_defs[ps].shift) break; gpte->page_size = ps; + gpte->page_shift = offset; gpte->eaddr = eaddr; gpte->raddr = gpa; @@ -128,12 +106,51 @@ int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, gpte->may_write = !!(pte & _PAGE_WRITE); gpte->may_execute = !!(pte & _PAGE_EXEC); + gpte->rc = pte & (_PAGE_ACCESSED | _PAGE_DIRTY); + if (pte_ret_p) *pte_ret_p = pte; return 0; } +/* + * Used to walk a partition or process table radix tree in guest memory + * Note: We exploit the fact that a partition table and a process + * table have the same layout, a partition-scoped page table and a + * process-scoped page table have the same layout, and the 2nd + * doubleword of a partition table entry has the same layout as + * the PTCR register. + */ +int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, + struct kvmppc_pte *gpte, u64 table, + int table_index, u64 *pte_ret_p) +{ + struct kvm *kvm = vcpu->kvm; + int ret; + unsigned long size, ptbl, root; + struct prtb_entry entry; + + if ((table & PRTS_MASK) > 24) + return -EINVAL; + size = 1ul << ((table & PRTS_MASK) + 12); + + /* Is the table big enough to contain this entry? */ + if ((table_index * sizeof(entry)) >= size) + return -EINVAL; + + /* Read the table to find the root of the radix tree */ + ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry)); + ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry)); + if (ret) + return ret; + + /* Root is stored in the first double word */ + root = be64_to_cpu(entry.prtb0); + + return kvmppc_mmu_walk_radix_tree(vcpu, eaddr, gpte, root, pte_ret_p); +} + int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, bool data, bool iswrite) { @@ -181,7 +198,7 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, } static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, - unsigned int pshift) + unsigned int pshift, unsigned int lpid) { unsigned long psize = PAGE_SIZE; @@ -189,12 +206,12 @@ static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, psize = 1UL << pshift; addr &= ~(psize - 1); - radix__flush_tlb_lpid_page(kvm->arch.lpid, addr, psize); + radix__flush_tlb_lpid_page(lpid, addr, psize); } -static void kvmppc_radix_flush_pwc(struct kvm *kvm) +static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid) { - radix__flush_pwc_lpid(kvm->arch.lpid); + radix__flush_pwc_lpid(lpid); } static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, @@ -239,16 +256,17 @@ static void kvmppc_pmd_free(pmd_t *pmdp) kmem_cache_free(kvm_pmd_cache, pmdp); } -static void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, - unsigned long gpa, unsigned int shift, - struct kvm_memory_slot *memslot) +void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, + unsigned long gpa, unsigned int shift, + struct kvm_memory_slot *memslot, + unsigned int lpid) { unsigned long old; old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift); - kvmppc_radix_tlbie_page(kvm, gpa, shift); - if (old & _PAGE_DIRTY) { + kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid); + if ((old & _PAGE_DIRTY) && (lpid == kvm->arch.lpid)) { unsigned long gfn = gpa >> PAGE_SHIFT; unsigned long page_size = PAGE_SIZE; @@ -271,7 +289,8 @@ static void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, * and emit a warning if encountered, but there may already be data * corruption due to the unexpected mappings. */ -static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full) +static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full, + unsigned int lpid) { if (full) { memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE); @@ -285,14 +304,15 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full) WARN_ON_ONCE(1); kvmppc_unmap_pte(kvm, p, pte_pfn(*p) << PAGE_SHIFT, - PAGE_SHIFT, NULL); + PAGE_SHIFT, NULL, lpid); } } kvmppc_pte_free(pte); } -static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full) +static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full, + unsigned int lpid) { unsigned long im; pmd_t *p = pmd; @@ -307,20 +327,21 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full) WARN_ON_ONCE(1); kvmppc_unmap_pte(kvm, (pte_t *)p, pte_pfn(*(pte_t *)p) << PAGE_SHIFT, - PMD_SHIFT, NULL); + PMD_SHIFT, NULL, lpid); } } else { pte_t *pte; pte = pte_offset_map(p, 0); - kvmppc_unmap_free_pte(kvm, pte, full); + kvmppc_unmap_free_pte(kvm, pte, full, lpid); pmd_clear(p); } } kvmppc_pmd_free(pmd); } -static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud) +static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud, + unsigned int lpid) { unsigned long iu; pud_t *p = pud; @@ -334,36 +355,40 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud) pmd_t *pmd; pmd = pmd_offset(p, 0); - kvmppc_unmap_free_pmd(kvm, pmd, true); + kvmppc_unmap_free_pmd(kvm, pmd, true, lpid); pud_clear(p); } } pud_free(kvm->mm, pud); } -void kvmppc_free_radix(struct kvm *kvm) +void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid) { unsigned long ig; - pgd_t *pgd; - if (!kvm->arch.pgtable) - return; - pgd = kvm->arch.pgtable; for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) { pud_t *pud; if (!pgd_present(*pgd)) continue; pud = pud_offset(pgd, 0); - kvmppc_unmap_free_pud(kvm, pud); + kvmppc_unmap_free_pud(kvm, pud, lpid); pgd_clear(pgd); } - pgd_free(kvm->mm, kvm->arch.pgtable); - kvm->arch.pgtable = NULL; +} + +void kvmppc_free_radix(struct kvm *kvm) +{ + if (kvm->arch.pgtable) { + kvmppc_free_pgtable_radix(kvm, kvm->arch.pgtable, + kvm->arch.lpid); + pgd_free(kvm->mm, kvm->arch.pgtable); + kvm->arch.pgtable = NULL; + } } static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd, - unsigned long gpa) + unsigned long gpa, unsigned int lpid) { pte_t *pte = pte_offset_kernel(pmd, 0); @@ -373,13 +398,13 @@ static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd, * flushing the PWC again. */ pmd_clear(pmd); - kvmppc_radix_flush_pwc(kvm); + kvmppc_radix_flush_pwc(kvm, lpid); - kvmppc_unmap_free_pte(kvm, pte, false); + kvmppc_unmap_free_pte(kvm, pte, false, lpid); } static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, - unsigned long gpa) + unsigned long gpa, unsigned int lpid) { pmd_t *pmd = pmd_offset(pud, 0); @@ -389,9 +414,9 @@ static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, * so can be freed without flushing the PWC again. */ pud_clear(pud); - kvmppc_radix_flush_pwc(kvm); + kvmppc_radix_flush_pwc(kvm, lpid); - kvmppc_unmap_free_pmd(kvm, pmd, false); + kvmppc_unmap_free_pmd(kvm, pmd, false, lpid); } /* @@ -403,9 +428,9 @@ static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, */ #define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)) -static int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, - unsigned long gpa, unsigned int level, - unsigned long mmu_seq) +int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, + unsigned long gpa, unsigned int level, + unsigned long mmu_seq, unsigned int lpid) { pgd_t *pgd; pud_t *pud, *new_pud = NULL; @@ -471,7 +496,8 @@ static int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, goto out_unlock; } /* Valid 1GB page here already, remove it */ - kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL); + kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL, + lpid); } if (level == 2) { if (!pud_none(*pud)) { @@ -480,7 +506,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, * install a large page, so remove and free the page * table page. */ - kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa); + kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa, lpid); } kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte); ret = 0; @@ -506,7 +532,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) & PTE_BITS_MUST_MATCH); kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd), - 0, pte_val(pte), lgpa, PMD_SHIFT); + 0, pte_val(pte), lgpa, PMD_SHIFT); ret = 0; goto out_unlock; } @@ -520,7 +546,8 @@ static int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, goto out_unlock; } /* Valid 2MB page here already, remove it */ - kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL); + kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL, + lpid); } if (level == 1) { if (!pmd_none(*pmd)) { @@ -529,7 +556,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, * install a large page, so remove and free the page * table page. */ - kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa); + kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa, lpid); } kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte); ret = 0; @@ -569,8 +596,8 @@ static int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, return ret; } -static bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, - bool writing, unsigned long gpa) +bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, bool writing, + unsigned long gpa, unsigned int lpid) { unsigned long pgflags; unsigned int shift; @@ -597,11 +624,11 @@ static bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, return false; } -static int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, - unsigned long gpa, - struct kvm_memory_slot *memslot, - bool writing, bool kvm_ro, - pte_t *inserted_pte, unsigned int *levelp) +int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, + unsigned long gpa, + struct kvm_memory_slot *memslot, + bool writing, bool kvm_ro, + pte_t *inserted_pte, unsigned int *levelp) { struct kvm *kvm = vcpu->kvm; struct page *page = NULL; @@ -683,7 +710,7 @@ static int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, /* Allocate space in the tree and write the PTE */ ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level, - mmu_seq); + mmu_seq, kvm->arch.lpid); if (inserted_pte) *inserted_pte = pte; if (levelp) @@ -758,7 +785,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, if (dsisr & DSISR_SET_RC) { spin_lock(&kvm->mmu_lock); if (kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable, - writing, gpa)) + writing, gpa, kvm->arch.lpid)) dsisr &= ~DSISR_SET_RC; spin_unlock(&kvm->mmu_lock); @@ -786,7 +813,8 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); if (ptep && pte_present(*ptep)) - kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot); + kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot, + kvm->arch.lpid); return 0; } @@ -841,7 +869,7 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm, ret = 1 << (shift - PAGE_SHIFT); kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0, gpa, shift); - kvmppc_radix_tlbie_page(kvm, gpa, shift); + kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid); } return ret; } diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 5b9fd7c8d931..21a210c134af 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -12,9 +12,12 @@ #include #include +#include #include #include #include +#include +#include static struct patb_entry *pseries_partition_tb; @@ -403,10 +406,20 @@ struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid) */ static void kvmhv_release_nested(struct kvm_nested_guest *gp) { + struct kvm *kvm = gp->l1_host; + + if (gp->shadow_pgtable) { + /* + * No vcpu is using this struct and no call to + * kvmhv_get_nested can find this struct, + * so we don't need to hold kvm->mmu_lock. + */ + kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, + gp->shadow_lpid); + pgd_free(kvm->mm, gp->shadow_pgtable); + } kvmhv_set_ptbl_entry(gp->shadow_lpid, 0, 0); kvmppc_free_lpid(gp->shadow_lpid); - if (gp->shadow_pgtable) - pgd_free(gp->l1_host->mm, gp->shadow_pgtable); kfree(gp); } @@ -466,6 +479,12 @@ void kvmhv_release_all_nested(struct kvm *kvm) /* caller must hold gp->tlb_lock */ void kvmhv_flush_nested(struct kvm_nested_guest *gp) { + struct kvm *kvm = gp->l1_host; + + spin_lock(&kvm->mmu_lock); + kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid); + spin_unlock(&kvm->mmu_lock); + radix__flush_tlb_lpid(gp->shadow_lpid); kvmhv_update_ptbl_cache(gp); if (gp->l1_gr_to_hr == 0) kvmhv_remove_nested(gp); @@ -525,7 +544,314 @@ void kvmhv_put_nested(struct kvm_nested_guest *gp) kvmhv_release_nested(gp); } -long kvmhv_nested_page_fault(struct kvm_vcpu *vcpu) +static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu, + struct kvm_nested_guest *gp, + long gpa, int *shift_ret) { + struct kvm *kvm = vcpu->kvm; + bool ret = false; + pte_t *ptep; + int shift; + + spin_lock(&kvm->mmu_lock); + ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift); + if (!shift) + shift = PAGE_SHIFT; + if (ptep && pte_present(*ptep)) { + kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid); + ret = true; + } + spin_unlock(&kvm->mmu_lock); + + if (shift_ret) + *shift_ret = shift; + return ret; +} + +/* Used to convert a nested guest real address to a L1 guest real address */ +static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu, + struct kvm_nested_guest *gp, + unsigned long n_gpa, unsigned long dsisr, + struct kvmppc_pte *gpte_p) +{ + u64 fault_addr, flags = dsisr & DSISR_ISSTORE; + int ret; + + ret = kvmppc_mmu_walk_radix_tree(vcpu, n_gpa, gpte_p, gp->l1_gr_to_hr, + &fault_addr); + + if (ret) { + /* We didn't find a pte */ + if (ret == -EINVAL) { + /* Unsupported mmu config */ + flags |= DSISR_UNSUPP_MMU; + } else if (ret == -ENOENT) { + /* No translation found */ + flags |= DSISR_NOHPTE; + } else if (ret == -EFAULT) { + /* Couldn't access L1 real address */ + flags |= DSISR_PRTABLE_FAULT; + vcpu->arch.fault_gpa = fault_addr; + } else { + /* Unknown error */ + return ret; + } + goto forward_to_l1; + } else { + /* We found a pte -> check permissions */ + if (dsisr & DSISR_ISSTORE) { + /* Can we write? */ + if (!gpte_p->may_write) { + flags |= DSISR_PROTFAULT; + goto forward_to_l1; + } + } else if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) { + /* Can we execute? */ + if (!gpte_p->may_execute) { + flags |= SRR1_ISI_N_OR_G; + goto forward_to_l1; + } + } else { + /* Can we read? */ + if (!gpte_p->may_read && !gpte_p->may_write) { + flags |= DSISR_PROTFAULT; + goto forward_to_l1; + } + } + } + + return 0; + +forward_to_l1: + vcpu->arch.fault_dsisr = flags; + if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) { + vcpu->arch.shregs.msr &= ~0x783f0000ul; + vcpu->arch.shregs.msr |= flags; + } return RESUME_HOST; } + +static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu, + struct kvm_nested_guest *gp, + unsigned long n_gpa, + struct kvmppc_pte gpte, + unsigned long dsisr) +{ + struct kvm *kvm = vcpu->kvm; + bool writing = !!(dsisr & DSISR_ISSTORE); + u64 pgflags; + bool ret; + + /* Are the rc bits set in the L1 partition scoped pte? */ + pgflags = _PAGE_ACCESSED; + if (writing) + pgflags |= _PAGE_DIRTY; + if (pgflags & ~gpte.rc) + return RESUME_HOST; + + spin_lock(&kvm->mmu_lock); + /* Set the rc bit in the pte of our (L0) pgtable for the L1 guest */ + ret = kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable, writing, + gpte.raddr, kvm->arch.lpid); + spin_unlock(&kvm->mmu_lock); + if (!ret) + return -EINVAL; + + /* Set the rc bit in the pte of the shadow_pgtable for the nest guest */ + ret = kvmppc_hv_handle_set_rc(kvm, gp->shadow_pgtable, writing, n_gpa, + gp->shadow_lpid); + if (!ret) + return -EINVAL; + return 0; +} + +static inline int kvmppc_radix_level_to_shift(int level) +{ + switch (level) { + case 2: + return PUD_SHIFT; + case 1: + return PMD_SHIFT; + default: + return PAGE_SHIFT; + } +} + +static inline int kvmppc_radix_shift_to_level(int shift) +{ + if (shift == PUD_SHIFT) + return 2; + if (shift == PMD_SHIFT) + return 1; + if (shift == PAGE_SHIFT) + return 0; + WARN_ON_ONCE(1); + return 0; +} + +/* called with gp->tlb_lock held */ +static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, + struct kvm_nested_guest *gp) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_memory_slot *memslot; + struct kvmppc_pte gpte; + pte_t pte, *pte_p; + unsigned long mmu_seq; + unsigned long dsisr = vcpu->arch.fault_dsisr; + unsigned long ea = vcpu->arch.fault_dar; + unsigned long n_gpa, gpa, gfn, perm = 0UL; + unsigned int shift, l1_shift, level; + bool writing = !!(dsisr & DSISR_ISSTORE); + bool kvm_ro = false; + long int ret; + + if (!gp->l1_gr_to_hr) { + kvmhv_update_ptbl_cache(gp); + if (!gp->l1_gr_to_hr) + return RESUME_HOST; + } + + /* Convert the nested guest real address into a L1 guest real address */ + + n_gpa = vcpu->arch.fault_gpa & ~0xF000000000000FFFULL; + if (!(dsisr & DSISR_PRTABLE_FAULT)) + n_gpa |= ea & 0xFFF; + ret = kvmhv_translate_addr_nested(vcpu, gp, n_gpa, dsisr, &gpte); + + /* + * If the hardware found a translation but we don't now have a usable + * translation in the l1 partition-scoped tree, remove the shadow pte + * and let the guest retry. + */ + if (ret == RESUME_HOST && + (dsisr & (DSISR_PROTFAULT | DSISR_BADACCESS | DSISR_NOEXEC_OR_G | + DSISR_BAD_COPYPASTE))) + goto inval; + if (ret) + return ret; + + /* Failed to set the reference/change bits */ + if (dsisr & DSISR_SET_RC) { + ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr); + if (ret == RESUME_HOST) + return ret; + if (ret) + goto inval; + dsisr &= ~DSISR_SET_RC; + if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE | + DSISR_PROTFAULT))) + return RESUME_GUEST; + } + + /* + * We took an HISI or HDSI while we were running a nested guest which + * means we have no partition scoped translation for that. This means + * we need to insert a pte for the mapping into our shadow_pgtable. + */ + + l1_shift = gpte.page_shift; + if (l1_shift < PAGE_SHIFT) { + /* We don't support l1 using a page size smaller than our own */ + pr_err("KVM: L1 guest page shift (%d) less than our own (%d)\n", + l1_shift, PAGE_SHIFT); + return -EINVAL; + } + gpa = gpte.raddr; + gfn = gpa >> PAGE_SHIFT; + + /* 1. Get the corresponding host memslot */ + + memslot = gfn_to_memslot(kvm, gfn); + if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { + if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) { + /* unusual error -> reflect to the guest as a DSI */ + kvmppc_core_queue_data_storage(vcpu, ea, dsisr); + return RESUME_GUEST; + } + /* passthrough of emulated MMIO case... */ + pr_err("emulated MMIO passthrough?\n"); + return -EINVAL; + } + if (memslot->flags & KVM_MEM_READONLY) { + if (writing) { + /* Give the guest a DSI */ + kvmppc_core_queue_data_storage(vcpu, ea, + DSISR_ISSTORE | DSISR_PROTFAULT); + return RESUME_GUEST; + } + kvm_ro = true; + } + + /* 2. Find the host pte for this L1 guest real address */ + + /* Used to check for invalidations in progress */ + mmu_seq = kvm->mmu_notifier_seq; + smp_rmb(); + + /* See if can find translation in our partition scoped tables for L1 */ + pte = __pte(0); + spin_lock(&kvm->mmu_lock); + pte_p = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); + if (!shift) + shift = PAGE_SHIFT; + if (pte_p) + pte = *pte_p; + spin_unlock(&kvm->mmu_lock); + + if (!pte_present(pte) || (writing && !(pte_val(pte) & _PAGE_WRITE))) { + /* No suitable pte found -> try to insert a mapping */ + ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, + writing, kvm_ro, &pte, &level); + if (ret == -EAGAIN) + return RESUME_GUEST; + else if (ret) + return ret; + shift = kvmppc_radix_level_to_shift(level); + } + + /* 3. Compute the pte we need to insert for nest_gpa -> host r_addr */ + + /* The permissions is the combination of the host and l1 guest ptes */ + perm |= gpte.may_read ? 0UL : _PAGE_READ; + perm |= gpte.may_write ? 0UL : _PAGE_WRITE; + perm |= gpte.may_execute ? 0UL : _PAGE_EXEC; + pte = __pte(pte_val(pte) & ~perm); + + /* What size pte can we insert? */ + if (shift > l1_shift) { + u64 mask; + unsigned int actual_shift = PAGE_SHIFT; + if (PMD_SHIFT < l1_shift) + actual_shift = PMD_SHIFT; + mask = (1UL << shift) - (1UL << actual_shift); + pte = __pte(pte_val(pte) | (gpa & mask)); + shift = actual_shift; + } + level = kvmppc_radix_shift_to_level(shift); + n_gpa &= ~((1UL << shift) - 1); + + /* 4. Insert the pte into our shadow_pgtable */ + + ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level, + mmu_seq, gp->shadow_lpid); + if (ret == -EAGAIN) + ret = RESUME_GUEST; /* Let the guest try again */ + + return ret; + + inval: + kvmhv_invalidate_shadow_pte(vcpu, gp, n_gpa, NULL); + return RESUME_GUEST; +} + +long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu) +{ + struct kvm_nested_guest *gp = vcpu->arch.nested; + long int ret; + + mutex_lock(&gp->tlb_lock); + ret = __kvmhv_nested_page_fault(vcpu, gp); + mutex_unlock(&gp->tlb_lock); + return ret; +} diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index fef3e1eb3a19..4c4dfc473800 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -830,6 +830,15 @@ void radix__flush_pwc_lpid(unsigned int lpid) } EXPORT_SYMBOL_GPL(radix__flush_pwc_lpid); +/* + * Flush partition scoped translations from LPID (=LPIDR) + */ +void radix__flush_tlb_lpid(unsigned int lpid) +{ + _tlbie_lpid(lpid, RIC_FLUSH_ALL); +} +EXPORT_SYMBOL_GPL(radix__flush_tlb_lpid); + /* * Flush partition scoped translations from LPID (=LPIDR) */ From 8cf531ed48cfc76f370369a372802a65361df27c Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Mon, 8 Oct 2018 16:31:08 +1100 Subject: [PATCH 083/204] KVM: PPC: Book3S HV: Introduce rmap to track nested guest mappings When a host (L0) page which is mapped into a (L1) guest is in turn mapped through to a nested (L2) guest we keep a reverse mapping (rmap) so that these mappings can be retrieved later. Whenever we create an entry in a shadow_pgtable for a nested guest we create a corresponding rmap entry and add it to the list for the L1 guest memslot at the index of the L1 guest page it maps. This means at the L1 guest memslot we end up with lists of rmaps. When we are notified of a host page being invalidated which has been mapped through to a (L1) guest, we can then walk the rmap list for that guest page, and find and invalidate all of the corresponding shadow_pgtable entries. In order to reduce memory consumption, we compress the information for each rmap entry down to 52 bits -- 12 bits for the LPID and 40 bits for the guest real page frame number -- which will fit in a single unsigned long. To avoid a scenario where a guest can trigger unbounded memory allocations, we scan the list when adding an entry to see if there is already an entry with the contents we need. This can occur, because we don't ever remove entries from the middle of a list. A struct nested guest rmap is a list pointer and an rmap entry; ---------------- | next pointer | ---------------- | rmap entry | ---------------- Thus the rmap pointer for each guest frame number in the memslot can be either NULL, a single entry, or a pointer to a list of nested rmap entries. gfn memslot rmap array ------------------------- 0 | NULL | (no rmap entry) ------------------------- 1 | single rmap entry | (rmap entry with low bit set) ------------------------- 2 | list head pointer | (list of rmap entries) ------------------------- The final entry always has the lowest bit set and is stored in the next pointer of the last list entry, or as a single rmap entry. With a list of rmap entries looking like; ----------------- ----------------- ------------------------- | list head ptr | ----> | next pointer | ----> | single rmap entry | ----------------- ----------------- ------------------------- | rmap entry | | rmap entry | ----------------- ------------------------- Signed-off-by: Suraj Jitindar Singh Signed-off-by: Paul Mackerras Reviewed-by: David Gibson Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/kvm_book3s.h | 3 + arch/powerpc/include/asm/kvm_book3s_64.h | 69 +++++++++++- arch/powerpc/kvm/book3s_64_mmu_radix.c | 44 +++++--- arch/powerpc/kvm/book3s_hv.c | 1 + arch/powerpc/kvm/book3s_hv_nested.c | 138 ++++++++++++++++++++++- 5 files changed, 240 insertions(+), 15 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 63f7ccfac174..d7aeb6f701a6 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -196,6 +196,9 @@ extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, int table_index, u64 *pte_ret_p); extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, bool data, bool iswrite); +extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, + unsigned int shift, struct kvm_memory_slot *memslot, + unsigned int lpid); extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, bool writing, unsigned long gpa, unsigned int lpid); diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 5496152f70e1..c2a9146ee016 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -53,6 +53,66 @@ struct kvm_nested_guest { struct kvm_nested_guest *next; }; +/* + * We define a nested rmap entry as a single 64-bit quantity + * 0xFFF0000000000000 12-bit lpid field + * 0x000FFFFFFFFFF000 40-bit guest 4k page frame number + * 0x0000000000000001 1-bit single entry flag + */ +#define RMAP_NESTED_LPID_MASK 0xFFF0000000000000UL +#define RMAP_NESTED_LPID_SHIFT (52) +#define RMAP_NESTED_GPA_MASK 0x000FFFFFFFFFF000UL +#define RMAP_NESTED_IS_SINGLE_ENTRY 0x0000000000000001UL + +/* Structure for a nested guest rmap entry */ +struct rmap_nested { + struct llist_node list; + u64 rmap; +}; + +/* + * for_each_nest_rmap_safe - iterate over the list of nested rmap entries + * safe against removal of the list entry or NULL list + * @pos: a (struct rmap_nested *) to use as a loop cursor + * @node: pointer to the first entry + * NOTE: this can be NULL + * @rmapp: an (unsigned long *) in which to return the rmap entries on each + * iteration + * NOTE: this must point to already allocated memory + * + * The nested_rmap is a llist of (struct rmap_nested) entries pointed to by the + * rmap entry in the memslot. The list is always terminated by a "single entry" + * stored in the list element of the final entry of the llist. If there is ONLY + * a single entry then this is itself in the rmap entry of the memslot, not a + * llist head pointer. + * + * Note that the iterator below assumes that a nested rmap entry is always + * non-zero. This is true for our usage because the LPID field is always + * non-zero (zero is reserved for the host). + * + * This should be used to iterate over the list of rmap_nested entries with + * processing done on the u64 rmap value given by each iteration. This is safe + * against removal of list entries and it is always safe to call free on (pos). + * + * e.g. + * struct rmap_nested *cursor; + * struct llist_node *first; + * unsigned long rmap; + * for_each_nest_rmap_safe(cursor, first, &rmap) { + * do_something(rmap); + * free(cursor); + * } + */ +#define for_each_nest_rmap_safe(pos, node, rmapp) \ + for ((pos) = llist_entry((node), typeof(*(pos)), list); \ + (node) && \ + (*(rmapp) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \ + ((u64) (node)) : ((pos)->rmap))) && \ + (((node) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \ + ((struct llist_node *) ((pos) = NULL)) : \ + (pos)->list.next)), true); \ + (pos) = llist_entry((node), typeof(*(pos)), list)) + struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, bool create); void kvmhv_put_nested(struct kvm_nested_guest *gp); @@ -551,7 +611,14 @@ static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu) extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, unsigned long gpa, unsigned int level, - unsigned long mmu_seq, unsigned int lpid); + unsigned long mmu_seq, unsigned int lpid, + unsigned long *rmapp, struct rmap_nested **n_rmap); +extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp, + struct rmap_nested **n_rmap); +extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm, + struct kvm_memory_slot *memslot, + unsigned long gpa, unsigned long hpa, + unsigned long nbytes); #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index c4b1a9e1e3ff..4c1eccb20190 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -256,27 +256,38 @@ static void kvmppc_pmd_free(pmd_t *pmdp) kmem_cache_free(kvm_pmd_cache, pmdp); } -void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, - unsigned long gpa, unsigned int shift, - struct kvm_memory_slot *memslot, +/* Called with kvm->mmu_lock held */ +void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, + unsigned int shift, struct kvm_memory_slot *memslot, unsigned int lpid) { unsigned long old; + unsigned long gfn = gpa >> PAGE_SHIFT; + unsigned long page_size = PAGE_SIZE; + unsigned long hpa; old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift); kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid); - if ((old & _PAGE_DIRTY) && (lpid == kvm->arch.lpid)) { - unsigned long gfn = gpa >> PAGE_SHIFT; - unsigned long page_size = PAGE_SIZE; - if (shift) - page_size = 1ul << shift; + /* The following only applies to L1 entries */ + if (lpid != kvm->arch.lpid) + return; + + if (!memslot) { + memslot = gfn_to_memslot(kvm, gfn); if (!memslot) - memslot = gfn_to_memslot(kvm, gfn); - if (memslot && memslot->dirty_bitmap) - kvmppc_update_dirty_map(memslot, gfn, page_size); + return; } + if (shift) + page_size = 1ul << shift; + + gpa &= ~(page_size - 1); + hpa = old & PTE_RPN_MASK; + kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size); + + if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) + kvmppc_update_dirty_map(memslot, gfn, page_size); } /* @@ -430,7 +441,8 @@ static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, unsigned long gpa, unsigned int level, - unsigned long mmu_seq, unsigned int lpid) + unsigned long mmu_seq, unsigned int lpid, + unsigned long *rmapp, struct rmap_nested **n_rmap) { pgd_t *pgd; pud_t *pud, *new_pud = NULL; @@ -509,6 +521,8 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa, lpid); } kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte); + if (rmapp && n_rmap) + kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap); ret = 0; goto out_unlock; } @@ -559,6 +573,8 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa, lpid); } kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte); + if (rmapp && n_rmap) + kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap); ret = 0; goto out_unlock; } @@ -583,6 +599,8 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, goto out_unlock; } kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte); + if (rmapp && n_rmap) + kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap); ret = 0; out_unlock: @@ -710,7 +728,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, /* Allocate space in the tree and write the PTE */ ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level, - mmu_seq, kvm->arch.lpid); + mmu_seq, kvm->arch.lpid, NULL, NULL); if (inserted_pte) *inserted_pte = pte; if (levelp) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index a87912508f63..6e69b4de9a9a 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4486,6 +4486,7 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm) kvmppc_free_hpt(&kvm->arch.hpt); kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR, LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR); + kvmppc_rmap_reset(kvm); kvm->arch.radix = 1; return 0; } diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 21a210c134af..3fa676b2acd9 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -10,6 +10,7 @@ #include #include +#include #include #include @@ -22,6 +23,7 @@ static struct patb_entry *pseries_partition_tb; static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp); +static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free); void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) { @@ -456,6 +458,8 @@ void kvmhv_release_all_nested(struct kvm *kvm) int i; struct kvm_nested_guest *gp; struct kvm_nested_guest *freelist = NULL; + struct kvm_memory_slot *memslot; + int srcu_idx; spin_lock(&kvm->mmu_lock); for (i = 0; i <= kvm->arch.max_nested_lpid; i++) { @@ -474,6 +478,11 @@ void kvmhv_release_all_nested(struct kvm *kvm) freelist = gp->next; kvmhv_release_nested(gp); } + + srcu_idx = srcu_read_lock(&kvm->srcu); + kvm_for_each_memslot(memslot, kvm_memslots(kvm)) + kvmhv_free_memslot_nest_rmap(memslot); + srcu_read_unlock(&kvm->srcu, srcu_idx); } /* caller must hold gp->tlb_lock */ @@ -544,6 +553,123 @@ void kvmhv_put_nested(struct kvm_nested_guest *gp) kvmhv_release_nested(gp); } +static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid) +{ + if (lpid > kvm->arch.max_nested_lpid) + return NULL; + return kvm->arch.nested_guests[lpid]; +} + +static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2) +{ + return !((rmap_1 ^ rmap_2) & (RMAP_NESTED_LPID_MASK | + RMAP_NESTED_GPA_MASK)); +} + +void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp, + struct rmap_nested **n_rmap) +{ + struct llist_node *entry = ((struct llist_head *) rmapp)->first; + struct rmap_nested *cursor; + u64 rmap, new_rmap = (*n_rmap)->rmap; + + /* Are there any existing entries? */ + if (!(*rmapp)) { + /* No -> use the rmap as a single entry */ + *rmapp = new_rmap | RMAP_NESTED_IS_SINGLE_ENTRY; + return; + } + + /* Do any entries match what we're trying to insert? */ + for_each_nest_rmap_safe(cursor, entry, &rmap) { + if (kvmhv_n_rmap_is_equal(rmap, new_rmap)) + return; + } + + /* Do we need to create a list or just add the new entry? */ + rmap = *rmapp; + if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */ + *rmapp = 0UL; + llist_add(&((*n_rmap)->list), (struct llist_head *) rmapp); + if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */ + (*n_rmap)->list.next = (struct llist_node *) rmap; + + /* Set NULL so not freed by caller */ + *n_rmap = NULL; +} + +static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap, + unsigned long hpa, unsigned long mask) +{ + struct kvm_nested_guest *gp; + unsigned long gpa; + unsigned int shift, lpid; + pte_t *ptep; + + gpa = n_rmap & RMAP_NESTED_GPA_MASK; + lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT; + gp = kvmhv_find_nested(kvm, lpid); + if (!gp) + return; + + /* Find and invalidate the pte */ + ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift); + /* Don't spuriously invalidate ptes if the pfn has changed */ + if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa)) + kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid); +} + +static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp, + unsigned long hpa, unsigned long mask) +{ + struct llist_node *entry = llist_del_all((struct llist_head *) rmapp); + struct rmap_nested *cursor; + unsigned long rmap; + + for_each_nest_rmap_safe(cursor, entry, &rmap) { + kvmhv_remove_nest_rmap(kvm, rmap, hpa, mask); + kfree(cursor); + } +} + +/* called with kvm->mmu_lock held */ +void kvmhv_remove_nest_rmap_range(struct kvm *kvm, + struct kvm_memory_slot *memslot, + unsigned long gpa, unsigned long hpa, + unsigned long nbytes) +{ + unsigned long gfn, end_gfn; + unsigned long addr_mask; + + if (!memslot) + return; + gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn; + end_gfn = gfn + (nbytes >> PAGE_SHIFT); + + addr_mask = PTE_RPN_MASK & ~(nbytes - 1); + hpa &= addr_mask; + + for (; gfn < end_gfn; gfn++) { + unsigned long *rmap = &memslot->arch.rmap[gfn]; + kvmhv_remove_nest_rmap_list(kvm, rmap, hpa, addr_mask); + } +} + +static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free) +{ + unsigned long page; + + for (page = 0; page < free->npages; page++) { + unsigned long rmap, *rmapp = &free->arch.rmap[page]; + struct rmap_nested *cursor; + struct llist_node *entry; + + entry = llist_del_all((struct llist_head *) rmapp); + for_each_nest_rmap_safe(cursor, entry, &rmap) + kfree(cursor); + } +} + static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu, struct kvm_nested_guest *gp, long gpa, int *shift_ret) @@ -695,11 +821,13 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, { struct kvm *kvm = vcpu->kvm; struct kvm_memory_slot *memslot; + struct rmap_nested *n_rmap; struct kvmppc_pte gpte; pte_t pte, *pte_p; unsigned long mmu_seq; unsigned long dsisr = vcpu->arch.fault_dsisr; unsigned long ea = vcpu->arch.fault_dar; + unsigned long *rmapp; unsigned long n_gpa, gpa, gfn, perm = 0UL; unsigned int shift, l1_shift, level; bool writing = !!(dsisr & DSISR_ISSTORE); @@ -833,8 +961,16 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, /* 4. Insert the pte into our shadow_pgtable */ + n_rmap = kzalloc(sizeof(*n_rmap), GFP_KERNEL); + if (!n_rmap) + return RESUME_GUEST; /* Let the guest try again */ + n_rmap->rmap = (n_gpa & RMAP_NESTED_GPA_MASK) | + (((unsigned long) gp->l1_lpid) << RMAP_NESTED_LPID_SHIFT); + rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level, - mmu_seq, gp->shadow_lpid); + mmu_seq, gp->shadow_lpid, rmapp, &n_rmap); + if (n_rmap) + kfree(n_rmap); if (ret == -EAGAIN) ret = RESUME_GUEST; /* Let the guest try again */ From e3b6b4661527e821ffbe3db83952fdb1e6e47c49 Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Mon, 8 Oct 2018 16:31:09 +1100 Subject: [PATCH 084/204] KVM: PPC: Book3S HV: Implement H_TLB_INVALIDATE hcall When running a nested (L2) guest the guest (L1) hypervisor will use the H_TLB_INVALIDATE hcall when it needs to change the partition scoped page tables or the partition table which it manages. It will use this hcall in the situations where it would use a partition-scoped tlbie instruction if it were running in hypervisor mode. The H_TLB_INVALIDATE hcall can invalidate different scopes: Invalidate TLB for a given target address: - This invalidates a single L2 -> L1 pte - We need to invalidate any L2 -> L0 shadow_pgtable ptes which map the L2 address space which is being invalidated. This is because a single L2 -> L1 pte may have been mapped with more than one pte in the L2 -> L0 page tables. Invalidate the entire TLB for a given LPID or for all LPIDs: - Invalidate the entire shadow_pgtable for a given nested guest, or for all nested guests. Invalidate the PWC (page walk cache) for a given LPID or for all LPIDs: - We don't cache the PWC, so nothing to do. Invalidate the entire TLB, PWC and partition table for a given/all LPIDs: - Here we re-read the partition table entry and remove the nested state for any nested guest for which the first doubleword of the partition table entry is now zero. The H_TLB_INVALIDATE hcall takes as parameters the tlbie instruction word (of which only the RIC, PRS and R fields are used), the rS value (giving the lpid, where required) and the rB value (giving the IS, AP and EPN values). [paulus@ozlabs.org - adapted to having the partition table in guest memory, added the H_TLB_INVALIDATE implementation, removed tlbie instruction emulation, reworded the commit message.] Reviewed-by: David Gibson Signed-off-by: Suraj Jitindar Singh Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/book3s/64/mmu-hash.h | 12 ++ arch/powerpc/include/asm/kvm_book3s.h | 1 + arch/powerpc/include/asm/ppc-opcode.h | 1 + arch/powerpc/kvm/book3s_emulate.c | 1 - arch/powerpc/kvm/book3s_hv.c | 3 + arch/powerpc/kvm/book3s_hv_nested.c | 196 +++++++++++++++++- 6 files changed, 212 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index b3520b549cba..66db23e2f4dc 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h @@ -203,6 +203,18 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) BUG(); } +static inline unsigned int ap_to_shift(unsigned long ap) +{ + int psize; + + for (psize = 0; psize < MMU_PAGE_COUNT; psize++) { + if (mmu_psize_defs[psize].ap == ap) + return mmu_psize_defs[psize].shift; + } + + return -1; +} + static inline unsigned long get_sllp_encoding(int psize) { unsigned long sllp; diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index d7aeb6f701a6..09f8e9ba69bc 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -301,6 +301,7 @@ long kvmhv_set_partition_table(struct kvm_vcpu *vcpu); void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1); void kvmhv_release_all_nested(struct kvm *kvm); long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu); +long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu); int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr); void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr); diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 665af14850e4..6093bc8f74e5 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -104,6 +104,7 @@ #define OP_31_XOP_LHZUX 311 #define OP_31_XOP_MSGSNDP 142 #define OP_31_XOP_MSGCLRP 174 +#define OP_31_XOP_TLBIE 306 #define OP_31_XOP_MFSPR 339 #define OP_31_XOP_LWAX 341 #define OP_31_XOP_LHAX 343 diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 2654df220d05..8c7e933e942e 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -36,7 +36,6 @@ #define OP_31_XOP_MTSR 210 #define OP_31_XOP_MTSRIN 242 #define OP_31_XOP_TLBIEL 274 -#define OP_31_XOP_TLBIE 306 /* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */ #define OP_31_XOP_FAKE_SC1 308 #define OP_31_XOP_SLBMTE 402 diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 6e69b4de9a9a..2d7494140de7 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -974,6 +974,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) break; case H_TLB_INVALIDATE: ret = H_FUNCTION; + if (!vcpu->kvm->arch.nested_enable) + break; + ret = kvmhv_do_nested_tlbie(vcpu); break; default: diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 3fa676b2acd9..c83c13d707e2 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -486,7 +486,7 @@ void kvmhv_release_all_nested(struct kvm *kvm) } /* caller must hold gp->tlb_lock */ -void kvmhv_flush_nested(struct kvm_nested_guest *gp) +static void kvmhv_flush_nested(struct kvm_nested_guest *gp) { struct kvm *kvm = gp->l1_host; @@ -694,6 +694,200 @@ static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu, return ret; } +static inline int get_ric(unsigned int instr) +{ + return (instr >> 18) & 0x3; +} + +static inline int get_prs(unsigned int instr) +{ + return (instr >> 17) & 0x1; +} + +static inline int get_r(unsigned int instr) +{ + return (instr >> 16) & 0x1; +} + +static inline int get_lpid(unsigned long r_val) +{ + return r_val & 0xffffffff; +} + +static inline int get_is(unsigned long r_val) +{ + return (r_val >> 10) & 0x3; +} + +static inline int get_ap(unsigned long r_val) +{ + return (r_val >> 5) & 0x7; +} + +static inline long get_epn(unsigned long r_val) +{ + return r_val >> 12; +} + +static int kvmhv_emulate_tlbie_tlb_addr(struct kvm_vcpu *vcpu, int lpid, + int ap, long epn) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_nested_guest *gp; + long npages; + int shift, shadow_shift; + unsigned long addr; + + shift = ap_to_shift(ap); + addr = epn << 12; + if (shift < 0) + /* Invalid ap encoding */ + return -EINVAL; + + addr &= ~((1UL << shift) - 1); + npages = 1UL << (shift - PAGE_SHIFT); + + gp = kvmhv_get_nested(kvm, lpid, false); + if (!gp) /* No such guest -> nothing to do */ + return 0; + mutex_lock(&gp->tlb_lock); + + /* There may be more than one host page backing this single guest pte */ + do { + kvmhv_invalidate_shadow_pte(vcpu, gp, addr, &shadow_shift); + + npages -= 1UL << (shadow_shift - PAGE_SHIFT); + addr += 1UL << shadow_shift; + } while (npages > 0); + + mutex_unlock(&gp->tlb_lock); + kvmhv_put_nested(gp); + return 0; +} + +static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu, + struct kvm_nested_guest *gp, int ric) +{ + struct kvm *kvm = vcpu->kvm; + + mutex_lock(&gp->tlb_lock); + switch (ric) { + case 0: + /* Invalidate TLB */ + spin_lock(&kvm->mmu_lock); + kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, + gp->shadow_lpid); + radix__flush_tlb_lpid(gp->shadow_lpid); + spin_unlock(&kvm->mmu_lock); + break; + case 1: + /* + * Invalidate PWC + * We don't cache this -> nothing to do + */ + break; + case 2: + /* Invalidate TLB, PWC and caching of partition table entries */ + kvmhv_flush_nested(gp); + break; + default: + break; + } + mutex_unlock(&gp->tlb_lock); +} + +static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_nested_guest *gp; + int i; + + spin_lock(&kvm->mmu_lock); + for (i = 0; i <= kvm->arch.max_nested_lpid; i++) { + gp = kvm->arch.nested_guests[i]; + if (gp) { + spin_unlock(&kvm->mmu_lock); + kvmhv_emulate_tlbie_lpid(vcpu, gp, ric); + spin_lock(&kvm->mmu_lock); + } + } + spin_unlock(&kvm->mmu_lock); +} + +static int kvmhv_emulate_priv_tlbie(struct kvm_vcpu *vcpu, unsigned int instr, + unsigned long rsval, unsigned long rbval) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_nested_guest *gp; + int r, ric, prs, is, ap; + int lpid; + long epn; + int ret = 0; + + ric = get_ric(instr); + prs = get_prs(instr); + r = get_r(instr); + lpid = get_lpid(rsval); + is = get_is(rbval); + + /* + * These cases are invalid and are not handled: + * r != 1 -> Only radix supported + * prs == 1 -> Not HV privileged + * ric == 3 -> No cluster bombs for radix + * is == 1 -> Partition scoped translations not associated with pid + * (!is) && (ric == 1 || ric == 2) -> Not supported by ISA + */ + if ((!r) || (prs) || (ric == 3) || (is == 1) || + ((!is) && (ric == 1 || ric == 2))) + return -EINVAL; + + switch (is) { + case 0: + /* + * We know ric == 0 + * Invalidate TLB for a given target address + */ + epn = get_epn(rbval); + ap = get_ap(rbval); + ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap, epn); + break; + case 2: + /* Invalidate matching LPID */ + gp = kvmhv_get_nested(kvm, lpid, false); + if (gp) { + kvmhv_emulate_tlbie_lpid(vcpu, gp, ric); + kvmhv_put_nested(gp); + } + break; + case 3: + /* Invalidate ALL LPIDs */ + kvmhv_emulate_tlbie_all_lpid(vcpu, ric); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +/* + * This handles the H_TLB_INVALIDATE hcall. + * Parameters are (r4) tlbie instruction code, (r5) rS contents, + * (r6) rB contents. + */ +long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu) +{ + int ret; + + ret = kvmhv_emulate_priv_tlbie(vcpu, kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6)); + if (ret) + return H_PARAMETER; + return H_SUCCESS; +} + /* Used to convert a nested guest real address to a L1 guest real address */ static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu, struct kvm_nested_guest *gp, From 690ed4cad87a34781a7f3c34a6cebda6cc2cc06c Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 8 Oct 2018 16:31:10 +1100 Subject: [PATCH 085/204] KVM: PPC: Book3S HV: Use hypercalls for TLB invalidation when nested This adds code to call the H_TLB_INVALIDATE hypercall when running as a guest, in the cases where we need to invalidate TLBs (or other MMU caches) as part of managing the mappings for a nested guest. Calling H_TLB_INVALIDATE lets the nested hypervisor inform the parent hypervisor about changes to partition-scoped page tables or the partition table without needing to do hypervisor-privileged tlbie instructions. Reviewed-by: David Gibson Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/kvm_book3s_64.h | 5 ++++ arch/powerpc/kvm/book3s_64_mmu_radix.c | 30 ++++++++++++++++++++++-- arch/powerpc/kvm/book3s_hv_nested.c | 30 +++++++++++++++++++----- 3 files changed, 57 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index c2a9146ee016..719b31723567 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -24,6 +24,7 @@ #include #include #include +#include #ifdef CONFIG_PPC_PSERIES static inline bool kvmhv_on_pseries(void) @@ -117,6 +118,10 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, bool create); void kvmhv_put_nested(struct kvm_nested_guest *gp); +/* Encoding of first parameter for H_TLB_INVALIDATE */ +#define H_TLBIE_P1_ENC(ric, prs, r) (___PPC_RIC(ric) | ___PPC_PRS(prs) | \ + ___PPC_R(r)) + /* Power architecture requires HPT is at least 256kiB, at most 64TiB */ #define PPC_MIN_HPT_ORDER 18 #define PPC_MAX_HPT_ORDER 46 diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 4c1eccb20190..ae0e3edd94bc 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -201,17 +201,43 @@ static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, unsigned int pshift, unsigned int lpid) { unsigned long psize = PAGE_SIZE; + int psi; + long rc; + unsigned long rb; if (pshift) psize = 1UL << pshift; + else + pshift = PAGE_SHIFT; addr &= ~(psize - 1); - radix__flush_tlb_lpid_page(lpid, addr, psize); + + if (!kvmhv_on_pseries()) { + radix__flush_tlb_lpid_page(lpid, addr, psize); + return; + } + + psi = shift_to_mmu_psize(pshift); + rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58)); + rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1), + lpid, rb); + if (rc) + pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc); } static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid) { - radix__flush_pwc_lpid(lpid); + long rc; + + if (!kvmhv_on_pseries()) { + radix__flush_pwc_lpid(lpid); + return; + } + + rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1), + lpid, TLBIEL_INVAL_SET_LPID); + if (rc) + pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc); } static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index c83c13d707e2..486d9007c288 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -299,14 +299,32 @@ void kvmhv_nested_exit(void) } } +static void kvmhv_flush_lpid(unsigned int lpid) +{ + long rc; + + if (!kvmhv_on_pseries()) { + radix__flush_tlb_lpid(lpid); + return; + } + + rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1), + lpid, TLBIEL_INVAL_SET_LPID); + if (rc) + pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc); +} + void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1) { - if (cpu_has_feature(CPU_FTR_HVMODE)) { + if (!kvmhv_on_pseries()) { mmu_partition_table_set_entry(lpid, dw0, dw1); - } else { - pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0); - pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1); + return; } + + pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0); + pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1); + /* L0 will do the necessary barriers */ + kvmhv_flush_lpid(lpid); } static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp) @@ -493,7 +511,7 @@ static void kvmhv_flush_nested(struct kvm_nested_guest *gp) spin_lock(&kvm->mmu_lock); kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid); spin_unlock(&kvm->mmu_lock); - radix__flush_tlb_lpid(gp->shadow_lpid); + kvmhv_flush_lpid(gp->shadow_lpid); kvmhv_update_ptbl_cache(gp); if (gp->l1_gr_to_hr == 0) kvmhv_remove_nested(gp); @@ -777,7 +795,7 @@ static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu, spin_lock(&kvm->mmu_lock); kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid); - radix__flush_tlb_lpid(gp->shadow_lpid); + kvmhv_flush_lpid(gp->shadow_lpid); spin_unlock(&kvm->mmu_lock); break; case 1: From 9d0b048da788c1df927d808bb60e7fd4f19a3a12 Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Mon, 8 Oct 2018 16:31:11 +1100 Subject: [PATCH 086/204] KVM: PPC: Book3S HV: Invalidate TLB when nested vcpu moves physical cpu This is only done at level 0, since only level 0 knows which physical CPU a vcpu is running on. This does for nested guests what L0 already did for its own guests, which is to flush the TLB on a pCPU when it goes to run a vCPU there, and there is another vCPU in the same VM which previously ran on this pCPU and has now started to run on another pCPU. This is to handle the situation where the other vCPU touched a mapping, moved to another pCPU and did a tlbiel (local-only tlbie) on that new pCPU and thus left behind a stale TLB entry on this pCPU. This introduces a limit on the the vcpu_token values used in the H_ENTER_NESTED hcall -- they must now be less than NR_CPUS. [paulus@ozlabs.org - made prev_cpu array be short[] to reduce memory consumption.] Reviewed-by: David Gibson Signed-off-by: Suraj Jitindar Singh Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/kvm_book3s_64.h | 3 + arch/powerpc/kvm/book3s_hv.c | 101 ++++++++++++++--------- arch/powerpc/kvm/book3s_hv_nested.c | 5 ++ 3 files changed, 71 insertions(+), 38 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 719b31723567..83d4def5f1ed 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -52,6 +52,9 @@ struct kvm_nested_guest { long refcnt; /* number of pointers to this struct */ struct mutex tlb_lock; /* serialize page faults and tlbies */ struct kvm_nested_guest *next; + cpumask_t need_tlb_flush; + cpumask_t cpu_in_guest; + short prev_cpu[NR_CPUS]; }; /* diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 2d7494140de7..f8c14e86a4ab 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2397,10 +2397,18 @@ static void kvmppc_release_hwthread(int cpu) static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu) { + struct kvm_nested_guest *nested = vcpu->arch.nested; + cpumask_t *cpu_in_guest; int i; cpu = cpu_first_thread_sibling(cpu); - cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush); + if (nested) { + cpumask_set_cpu(cpu, &nested->need_tlb_flush); + cpu_in_guest = &nested->cpu_in_guest; + } else { + cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush); + cpu_in_guest = &kvm->arch.cpu_in_guest; + } /* * Make sure setting of bit in need_tlb_flush precedes * testing of cpu_in_guest bits. The matching barrier on @@ -2408,13 +2416,23 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu) */ smp_mb(); for (i = 0; i < threads_per_core; ++i) - if (cpumask_test_cpu(cpu + i, &kvm->arch.cpu_in_guest)) + if (cpumask_test_cpu(cpu + i, cpu_in_guest)) smp_call_function_single(cpu + i, do_nothing, NULL, 1); } static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu) { + struct kvm_nested_guest *nested = vcpu->arch.nested; struct kvm *kvm = vcpu->kvm; + int prev_cpu; + + if (!cpu_has_feature(CPU_FTR_HVMODE)) + return; + + if (nested) + prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id]; + else + prev_cpu = vcpu->arch.prev_cpu; /* * With radix, the guest can do TLB invalidations itself, @@ -2428,12 +2446,46 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu) * ran to flush the TLB. The TLB is shared between threads, * so we use a single bit in .need_tlb_flush for all 4 threads. */ - if (vcpu->arch.prev_cpu != pcpu) { - if (vcpu->arch.prev_cpu >= 0 && - cpu_first_thread_sibling(vcpu->arch.prev_cpu) != + if (prev_cpu != pcpu) { + if (prev_cpu >= 0 && + cpu_first_thread_sibling(prev_cpu) != cpu_first_thread_sibling(pcpu)) - radix_flush_cpu(kvm, vcpu->arch.prev_cpu, vcpu); - vcpu->arch.prev_cpu = pcpu; + radix_flush_cpu(kvm, prev_cpu, vcpu); + if (nested) + nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu; + else + vcpu->arch.prev_cpu = pcpu; + } +} + +static void kvmppc_radix_check_need_tlb_flush(struct kvm *kvm, int pcpu, + struct kvm_nested_guest *nested) +{ + cpumask_t *need_tlb_flush; + int lpid; + + if (!cpu_has_feature(CPU_FTR_HVMODE)) + return; + + if (cpu_has_feature(CPU_FTR_ARCH_300)) + pcpu &= ~0x3UL; + + if (nested) { + lpid = nested->shadow_lpid; + need_tlb_flush = &nested->need_tlb_flush; + } else { + lpid = kvm->arch.lpid; + need_tlb_flush = &kvm->arch.need_tlb_flush; + } + + mtspr(SPRN_LPID, lpid); + isync(); + smp_mb(); + + if (cpumask_test_cpu(pcpu, need_tlb_flush)) { + radix__local_flush_tlb_lpid_guest(lpid); + /* Clear the bit after the TLB flush */ + cpumask_clear_cpu(pcpu, need_tlb_flush); } } @@ -3127,8 +3179,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) spin_unlock(&core_info.vc[sub]->lock); if (kvm_is_radix(vc->kvm)) { - int tmp = pcpu; - /* * Do we need to flush the process scoped TLB for the LPAR? * @@ -3139,17 +3189,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) * * Hash must be flushed in realmode in order to use tlbiel. */ - mtspr(SPRN_LPID, vc->kvm->arch.lpid); - isync(); - - if (cpu_has_feature(CPU_FTR_ARCH_300)) - tmp &= ~0x3UL; - - if (cpumask_test_cpu(tmp, &vc->kvm->arch.need_tlb_flush)) { - radix__local_flush_tlb_lpid_guest(vc->kvm->arch.lpid); - /* Clear the bit after the TLB flush */ - cpumask_clear_cpu(tmp, &vc->kvm->arch.need_tlb_flush); - } + kvmppc_radix_check_need_tlb_flush(vc->kvm, pcpu, NULL); } /* @@ -3872,12 +3912,11 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr) { - int trap, r, pcpu, pcpu0; + int trap, r, pcpu; int srcu_idx; struct kvmppc_vcore *vc; struct kvm *kvm = vcpu->kvm; struct kvm_nested_guest *nested = vcpu->arch.nested; - unsigned long lpid; trace_kvmppc_run_vcpu_enter(vcpu); @@ -3950,22 +3989,8 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, vc->vcore_state = VCORE_RUNNING; trace_kvmppc_run_core(vc, 0); - lpid = vc->kvm->arch.lpid; - if (nested) - lpid = nested->shadow_lpid; - mtspr(SPRN_LPID, lpid); - isync(); - - /* See comment above in kvmppc_run_core() about this */ - pcpu0 = pcpu; - if (cpu_has_feature(CPU_FTR_ARCH_300)) - pcpu0 &= ~0x3UL; - - if (cpumask_test_cpu(pcpu0, &kvm->arch.need_tlb_flush)) { - radix__local_flush_tlb_lpid_guest(lpid); - /* Clear the bit after the TLB flush */ - cpumask_clear_cpu(pcpu0, &kvm->arch.need_tlb_flush); - } + if (cpu_has_feature(CPU_FTR_HVMODE)) + kvmppc_radix_check_need_tlb_flush(kvm, pcpu, nested); trace_hardirqs_on(); guest_enter_irqoff(); diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 486d9007c288..a876dc3c8e1f 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -168,6 +168,9 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) if (err) return H_PARAMETER; + if (l2_hv.vcpu_token >= NR_CPUS) + return H_PARAMETER; + /* translate lpid */ l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true); if (!l2) @@ -412,6 +415,8 @@ struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid) goto out_free2; gp->shadow_lpid = shadow_lpid; + memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu)); + return gp; out_free2: From f3c99f97a3cda284418af6e242e06b351b83f647 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 8 Oct 2018 16:31:12 +1100 Subject: [PATCH 087/204] KVM: PPC: Book3S HV: Don't access HFSCR, LPIDR or LPCR when running nested When running as a nested hypervisor, this avoids reading hypervisor privileged registers (specifically HFSCR, LPIDR and LPCR) at startup; instead reasonable default values are used. This also avoids writing LPIDR in the single-vcpu entry/exit path. Also, this removes the check for CPU_FTR_HVMODE in kvmppc_mmu_hv_init() since its only caller already checks this. Reviewed-by: David Gibson Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/kvm/book3s_64_mmu_hv.c | 7 +++--- arch/powerpc/kvm/book3s_hv.c | 33 ++++++++++++++++++----------- 2 files changed, 24 insertions(+), 16 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 68e14afecac8..c615617e78ac 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -268,14 +268,13 @@ int kvmppc_mmu_hv_init(void) { unsigned long host_lpid, rsvd_lpid; - if (!cpu_has_feature(CPU_FTR_HVMODE)) - return -EINVAL; - if (!mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE)) return -EINVAL; /* POWER7 has 10-bit LPIDs (12-bit in POWER8) */ - host_lpid = mfspr(SPRN_LPID); + host_lpid = 0; + if (cpu_has_feature(CPU_FTR_HVMODE)) + host_lpid = mfspr(SPRN_LPID); rsvd_lpid = LPID_RSVD; kvmppc_init_lpid(rsvd_lpid + 1); diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index f8c14e86a4ab..c90deb5a5219 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2174,15 +2174,18 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, * Set the default HFSCR for the guest from the host value. * This value is only used on POWER9. * On POWER9, we want to virtualize the doorbell facility, so we - * turn off the HFSCR bit, which causes those instructions to trap. + * don't set the HFSCR_MSGP bit, and that causes those instructions + * to trap and then we emulate them. */ - vcpu->arch.hfscr = mfspr(SPRN_HFSCR); - if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) + vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB | + HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP; + if (cpu_has_feature(CPU_FTR_HVMODE)) { + vcpu->arch.hfscr &= mfspr(SPRN_HFSCR); + if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) + vcpu->arch.hfscr |= HFSCR_TM; + } + if (cpu_has_feature(CPU_FTR_TM_COMP)) vcpu->arch.hfscr |= HFSCR_TM; - else if (!cpu_has_feature(CPU_FTR_TM_COMP)) - vcpu->arch.hfscr &= ~HFSCR_TM; - if (cpu_has_feature(CPU_FTR_ARCH_300)) - vcpu->arch.hfscr &= ~HFSCR_MSGP; kvmppc_mmu_book3s_hv_init(vcpu); @@ -4006,8 +4009,10 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, srcu_read_unlock(&kvm->srcu, srcu_idx); - mtspr(SPRN_LPID, kvm->arch.host_lpid); - isync(); + if (cpu_has_feature(CPU_FTR_HVMODE)) { + mtspr(SPRN_LPID, kvm->arch.host_lpid); + isync(); + } trace_hardirqs_off(); set_irq_happened(trap); @@ -4634,9 +4639,13 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); /* Init LPCR for virtual RMA mode */ - kvm->arch.host_lpid = mfspr(SPRN_LPID); - kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); - lpcr &= LPCR_PECE | LPCR_LPES; + if (cpu_has_feature(CPU_FTR_HVMODE)) { + kvm->arch.host_lpid = mfspr(SPRN_LPID); + kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); + lpcr &= LPCR_PECE | LPCR_LPES; + } else { + lpcr = 0; + } lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE | LPCR_VPM0 | LPCR_VPM1; kvm->arch.vrma_slb_v = SLB_VSID_B_1T | From 3032341853daf07c723084a086ebe7a9bdf8f90b Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 8 Oct 2018 16:31:13 +1100 Subject: [PATCH 088/204] KVM: PPC: Book3S HV: Add one-reg interface to virtual PTCR register This adds a one-reg register identifier which can be used to read and set the virtual PTCR for the guest. This register identifies the address and size of the virtual partition table for the guest, which contains information about the nested guests under this guest. Migrating this value is the only extra requirement for migrating a guest which has nested guests (assuming of course that the destination host supports nested virtualization in the kvm-hv module). Reviewed-by: David Gibson Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- Documentation/virtual/kvm/api.txt | 1 + arch/powerpc/include/uapi/asm/kvm.h | 1 + arch/powerpc/kvm/book3s_hv.c | 6 ++++++ 3 files changed, 8 insertions(+) diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index c664064f76fb..017d851b6c56 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -1922,6 +1922,7 @@ registers, find a list below: PPC | KVM_REG_PPC_TIDR | 64 PPC | KVM_REG_PPC_PSSCR | 64 PPC | KVM_REG_PPC_DEC_EXPIRY | 64 + PPC | KVM_REG_PPC_PTCR | 64 PPC | KVM_REG_PPC_TM_GPR0 | 64 ... PPC | KVM_REG_PPC_TM_GPR31 | 64 diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 1b32b56a03d3..8c876c166ef2 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -634,6 +634,7 @@ struct kvm_ppc_cpu_char { #define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe) #define KVM_REG_PPC_ONLINE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf) +#define KVM_REG_PPC_PTCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc0) /* Transactional Memory checkpointed state: * This is all GPRs, all VSX regs and a subset of SPRs diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index c90deb5a5219..8458bbe2a973 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1710,6 +1710,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_ONLINE: *val = get_reg_val(id, vcpu->arch.online); break; + case KVM_REG_PPC_PTCR: + *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr); + break; default: r = -EINVAL; break; @@ -1941,6 +1944,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, atomic_dec(&vcpu->arch.vcore->online_count); vcpu->arch.online = i; break; + case KVM_REG_PPC_PTCR: + vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val); + break; default: r = -EINVAL; break; From 73937deb4b2d7f08ac48e9125de889c757a7a9b3 Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Mon, 8 Oct 2018 16:31:14 +1100 Subject: [PATCH 089/204] KVM: PPC: Book3S HV: Sanitise hv_regs on nested guest entry restore_hv_regs() is used to copy the hv_regs L1 wants to set to run the nested (L2) guest into the vcpu structure. We need to sanitise these values to ensure we don't let the L1 guest hypervisor do things we don't want it to. We don't let data address watchpoints or completed instruction address breakpoints be set to match in hypervisor state. We also don't let L1 enable features in the hypervisor facility status and control register (HFSCR) for L2 which we have disabled for L1. That is L2 will get the subset of features which the L0 hypervisor has enabled for L1 and the features L1 wants to enable for L2. This could mean we give L1 a hypervisor facility unavailable interrupt for a facility it thinks it has enabled, however it shouldn't have enabled a facility it itself doesn't have for the L2 guest. We sanitise the registers when copying in the L2 hv_regs. We don't need to sanitise when copying back the L1 hv_regs since these shouldn't be able to contain invalid values as they're just what was copied out. Reviewed-by: David Gibson Signed-off-by: Suraj Jitindar Singh Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/reg.h | 1 + arch/powerpc/kvm/book3s_hv_nested.c | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 6fda746ba328..c90698972f42 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -415,6 +415,7 @@ #define HFSCR_DSCR __MASK(FSCR_DSCR_LG) #define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG) #define HFSCR_FP __MASK(FSCR_FP_LG) +#define HFSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56) /* interrupt cause */ #define SPRN_TAR 0x32f /* Target Address Register */ #define SPRN_LPCR 0x13E /* LPAR Control Register */ #define LPCR_VPM0 ASM_CONST(0x8000000000000000) diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index a876dc3c8e1f..e2305962fecf 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -86,6 +86,22 @@ static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap, } } +static void sanitise_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) +{ + /* + * Don't let L1 enable features for L2 which we've disabled for L1, + * but preserve the interrupt cause field. + */ + hr->hfscr &= (HFSCR_INTR_CAUSE | vcpu->arch.hfscr); + + /* Don't let data address watchpoint match in hypervisor state */ + hr->dawrx0 &= ~DAWRX_HYP; + + /* Don't let completed instruction address breakpt match in HV state */ + if ((hr->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) + hr->ciabr &= ~CIABR_PRIV; +} + static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) { struct kvmppc_vcore *vc = vcpu->arch.vcore; @@ -198,6 +214,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD | LPCR_LPES | LPCR_MER; lpcr = (vc->lpcr & ~mask) | (l2_hv.lpcr & mask); + sanitise_hv_regs(vcpu, &l2_hv); restore_hv_regs(vcpu, &l2_hv); vcpu->arch.ret = RESUME_GUEST; From 10b5022db7861a98f3d9a87eed8405f2d4e37ed6 Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Mon, 8 Oct 2018 16:31:15 +1100 Subject: [PATCH 090/204] KVM: PPC: Book3S HV: Handle differing endianness for H_ENTER_NESTED The hcall H_ENTER_NESTED takes two parameters: the address in L1 guest memory of a hv_regs struct and the address of a pt_regs struct. The hcall requests the L0 hypervisor to use the register values in these structs to run a L2 guest and to return the exit state of the L2 guest in these structs. These are in the endianness of the L1 guest, rather than being always big-endian as is usually the case for PAPR hypercalls. This is convenient because it means that the L1 guest can pass the address of the regs field in its kvm_vcpu_arch struct. This also improves performance slightly by avoiding the need for two copies of the pt_regs struct. When reading/writing these structures, this patch handles the case where the endianness of the L1 guest differs from that of the L0 hypervisor, by byteswapping the structures after reading and before writing them back. Since all the fields of the pt_regs are of the same type, i.e., unsigned long, we treat it as an array of unsigned longs. The fields of struct hv_guest_state are not all the same, so its fields are byteswapped individually. Reviewed-by: David Gibson Signed-off-by: Suraj Jitindar Singh Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/kvm/book3s_hv_nested.c | 51 ++++++++++++++++++++++++++++- 1 file changed, 50 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index e2305962fecf..3f21f785bf0a 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -51,6 +51,48 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) hr->ppr = vcpu->arch.ppr; } +static void byteswap_pt_regs(struct pt_regs *regs) +{ + unsigned long *addr = (unsigned long *) regs; + + for (; addr < ((unsigned long *) (regs + 1)); addr++) + *addr = swab64(*addr); +} + +static void byteswap_hv_regs(struct hv_guest_state *hr) +{ + hr->version = swab64(hr->version); + hr->lpid = swab32(hr->lpid); + hr->vcpu_token = swab32(hr->vcpu_token); + hr->lpcr = swab64(hr->lpcr); + hr->pcr = swab64(hr->pcr); + hr->amor = swab64(hr->amor); + hr->dpdes = swab64(hr->dpdes); + hr->hfscr = swab64(hr->hfscr); + hr->tb_offset = swab64(hr->tb_offset); + hr->dawr0 = swab64(hr->dawr0); + hr->dawrx0 = swab64(hr->dawrx0); + hr->ciabr = swab64(hr->ciabr); + hr->hdec_expiry = swab64(hr->hdec_expiry); + hr->purr = swab64(hr->purr); + hr->spurr = swab64(hr->spurr); + hr->ic = swab64(hr->ic); + hr->vtb = swab64(hr->vtb); + hr->hdar = swab64(hr->hdar); + hr->hdsisr = swab64(hr->hdsisr); + hr->heir = swab64(hr->heir); + hr->asdr = swab64(hr->asdr); + hr->srr0 = swab64(hr->srr0); + hr->srr1 = swab64(hr->srr1); + hr->sprg[0] = swab64(hr->sprg[0]); + hr->sprg[1] = swab64(hr->sprg[1]); + hr->sprg[2] = swab64(hr->sprg[2]); + hr->sprg[3] = swab64(hr->sprg[3]); + hr->pidr = swab64(hr->pidr); + hr->cfar = swab64(hr->cfar); + hr->ppr = swab64(hr->ppr); +} + static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap, struct hv_guest_state *hr) { @@ -175,6 +217,8 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) sizeof(struct hv_guest_state)); if (err) return H_PARAMETER; + if (kvmppc_need_byteswap(vcpu)) + byteswap_hv_regs(&l2_hv); if (l2_hv.version != HV_GUEST_STATE_VERSION) return H_P2; @@ -183,7 +227,8 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) sizeof(struct pt_regs)); if (err) return H_PARAMETER; - + if (kvmppc_need_byteswap(vcpu)) + byteswap_pt_regs(&l2_regs); if (l2_hv.vcpu_token >= NR_CPUS) return H_PARAMETER; @@ -255,6 +300,10 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) kvmhv_put_nested(l2); /* copy l2_hv_state and regs back to guest */ + if (kvmppc_need_byteswap(vcpu)) { + byteswap_hv_regs(&l2_hv); + byteswap_pt_regs(&l2_regs); + } err = kvm_vcpu_write_guest(vcpu, hv_ptr, &l2_hv, sizeof(struct hv_guest_state)); if (err) From de760db4d9d7b3e735dbc7e4e3fa2c00909abe7c Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 8 Oct 2018 16:31:16 +1100 Subject: [PATCH 091/204] KVM: PPC: Book3S HV: Allow HV module to load without hypervisor mode With this, the KVM-HV module can be loaded in a guest running under KVM-HV, and if the hypervisor supports nested virtualization, this guest can now act as a nested hypervisor and run nested guests. This also adds some checks to inform userspace that HPT guests are not supported by nested hypervisors (by returning false for the KVM_CAP_PPC_MMU_HASH_V3 capability), and to prevent userspace from configuring a guest to use HPT mode. Signed-off-by: Paul Mackerras Reviewed-by: David Gibson Signed-off-by: Michael Ellerman --- arch/powerpc/kvm/book3s_hv.c | 16 ++++++++++++---- arch/powerpc/kvm/powerpc.c | 3 ++- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 8458bbe2a973..e1ac69edf08a 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4811,11 +4811,15 @@ static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn, static int kvmppc_core_check_processor_compat_hv(void) { - if (!cpu_has_feature(CPU_FTR_HVMODE) || - !cpu_has_feature(CPU_FTR_ARCH_206)) - return -EIO; + if (cpu_has_feature(CPU_FTR_HVMODE) && + cpu_has_feature(CPU_FTR_ARCH_206)) + return 0; - return 0; + /* POWER9 in radix mode is capable of being a nested hypervisor. */ + if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled()) + return 0; + + return -EIO; } #ifdef CONFIG_KVM_XICS @@ -5133,6 +5137,10 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg) if (radix && !radix_enabled()) return -EINVAL; + /* If we're a nested hypervisor, we currently only support radix */ + if (kvmhv_on_pseries() && !radix) + return -EINVAL; + mutex_lock(&kvm->lock); if (radix != kvm_is_radix(kvm)) { if (kvm->arch.mmu_ready) { diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index eba5756d5b41..1f4b128894a0 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -594,7 +594,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = !!(hv_enabled && radix_enabled()); break; case KVM_CAP_PPC_MMU_HASH_V3: - r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300)); + r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) && + cpu_has_feature(CPU_FTR_HVMODE)); break; #endif case KVM_CAP_SYNC_MMU: From 83a055104eaf89589582659737ff5bf6eed63ac4 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 8 Oct 2018 16:31:17 +1100 Subject: [PATCH 092/204] KVM: PPC: Book3S HV: Add nested shadow page tables to debugfs This adds a list of valid shadow PTEs for each nested guest to the 'radix' file for the guest in debugfs. This can be useful for debugging. Reviewed-by: David Gibson Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/kvm_book3s_64.h | 1 + arch/powerpc/kvm/book3s_64_mmu_radix.c | 39 ++++++++++++++++++++++-- arch/powerpc/kvm/book3s_hv_nested.c | 15 +++++++++ 3 files changed, 52 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 83d4def5f1ed..6d298145d564 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -120,6 +120,7 @@ struct rmap_nested { struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, bool create); void kvmhv_put_nested(struct kvm_nested_guest *gp); +int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid); /* Encoding of first parameter for H_TLB_INVALIDATE */ #define H_TLBIE_P1_ENC(ric, prs, r) (___PPC_RIC(ric) | ___PPC_PRS(prs) | \ diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index ae0e3edd94bc..43b21e88c716 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -1002,6 +1002,7 @@ struct debugfs_radix_state { struct kvm *kvm; struct mutex mutex; unsigned long gpa; + int lpid; int chars_left; int buf_index; char buf[128]; @@ -1043,6 +1044,7 @@ static ssize_t debugfs_radix_read(struct file *file, char __user *buf, struct kvm *kvm; unsigned long gpa; pgd_t *pgt; + struct kvm_nested_guest *nested; pgd_t pgd, *pgdp; pud_t pud, *pudp; pmd_t pmd, *pmdp; @@ -1077,10 +1079,39 @@ static ssize_t debugfs_radix_read(struct file *file, char __user *buf, } gpa = p->gpa; - pgt = kvm->arch.pgtable; - while (len != 0 && gpa < RADIX_PGTABLE_RANGE) { + nested = NULL; + pgt = NULL; + while (len != 0 && p->lpid >= 0) { + if (gpa >= RADIX_PGTABLE_RANGE) { + gpa = 0; + pgt = NULL; + if (nested) { + kvmhv_put_nested(nested); + nested = NULL; + } + p->lpid = kvmhv_nested_next_lpid(kvm, p->lpid); + p->hdr = 0; + if (p->lpid < 0) + break; + } + if (!pgt) { + if (p->lpid == 0) { + pgt = kvm->arch.pgtable; + } else { + nested = kvmhv_get_nested(kvm, p->lpid, false); + if (!nested) { + gpa = RADIX_PGTABLE_RANGE; + continue; + } + pgt = nested->shadow_pgtable; + } + } + n = 0; if (!p->hdr) { - n = scnprintf(p->buf, sizeof(p->buf), + if (p->lpid > 0) + n = scnprintf(p->buf, sizeof(p->buf), + "\nNested LPID %d: ", p->lpid); + n += scnprintf(p->buf + n, sizeof(p->buf) - n, "pgdir: %lx\n", (unsigned long)pgt); p->hdr = 1; goto copy; @@ -1146,6 +1177,8 @@ static ssize_t debugfs_radix_read(struct file *file, char __user *buf, } } p->gpa = gpa; + if (nested) + kvmhv_put_nested(nested); out: mutex_unlock(&p->mutex); diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 3f21f785bf0a..401d2ecbebc5 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -1274,3 +1274,18 @@ long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu) mutex_unlock(&gp->tlb_lock); return ret; } + +int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid) +{ + int ret = -1; + + spin_lock(&kvm->mmu_lock); + while (++lpid <= kvm->arch.max_nested_lpid) { + if (kvm->arch.nested_guests[lpid]) { + ret = lpid; + break; + } + } + spin_unlock(&kvm->mmu_lock); + return ret; +} From aa069a996951f3e2e38437ef0316685a5893fc7e Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 21 Sep 2018 20:02:01 +1000 Subject: [PATCH 093/204] KVM: PPC: Book3S HV: Add a VM capability to enable nested virtualization With this, userspace can enable a KVM-HV guest to run nested guests under it. The administrator can control whether any nested guests can be run; setting the "nested" module parameter to false prevents any guests becoming nested hypervisors (that is, any attempt to enable the nested capability on a guest will fail). Guests which are already nested hypervisors will continue to be so. Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- Documentation/virtual/kvm/api.txt | 14 +++++++++++ arch/powerpc/include/asm/kvm_ppc.h | 1 + arch/powerpc/kvm/book3s_hv.c | 39 +++++++++++++++++++++++------- arch/powerpc/kvm/powerpc.c | 12 +++++++++ include/uapi/linux/kvm.h | 1 + 5 files changed, 58 insertions(+), 9 deletions(-) diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 2f5f9b743bff..fde48b6708f1 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -4532,6 +4532,20 @@ With this capability, a guest may read the MSR_PLATFORM_INFO MSR. Otherwise, a #GP would be raised when the guest tries to access. Currently, this capability does not enable write permissions of this MSR for the guest. +7.16 KVM_CAP_PPC_NESTED_HV + +Architectures: ppc +Parameters: none +Returns: 0 on success, -EINVAL when the implementation doesn't support + nested-HV virtualization. + +HV-KVM on POWER9 and later systems allows for "nested-HV" +virtualization, which provides a way for a guest VM to run guests that +can run using the CPU's supervisor mode (privileged non-hypervisor +state). Enabling this capability on a VM depends on the CPU having +the necessary functionality and on the facility being enabled with a +kvm-hv module parameter. + 8. Other capabilities. ---------------------- diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 88362ccda549..9b89b1918dfc 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -325,6 +325,7 @@ struct kvmppc_ops { int (*set_smt_mode)(struct kvm *kvm, unsigned long mode, unsigned long flags); void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr); + int (*enable_nested)(struct kvm *kvm); }; extern struct kvmppc_ops *kvmppc_hv_ops; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index f3cdf51d0191..89bcf923d542 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -122,6 +122,16 @@ module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect, 0644); MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core"); #endif +/* If set, guests are allowed to create and control nested guests */ +static bool nested = true; +module_param(nested, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(nested, "Enable nested virtualization (only on POWER9)"); + +static inline bool nesting_enabled(struct kvm *kvm) +{ + return kvm->arch.nested_enable && kvm_is_radix(kvm); +} + /* If set, the threads on each CPU core have to be in the same MMU mode */ static bool no_mixing_hpt_and_radix; @@ -963,12 +973,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) case H_SET_PARTITION_TABLE: ret = H_FUNCTION; - if (vcpu->kvm->arch.nested_enable) + if (nesting_enabled(vcpu->kvm)) ret = kvmhv_set_partition_table(vcpu); break; case H_ENTER_NESTED: ret = H_FUNCTION; - if (!vcpu->kvm->arch.nested_enable) + if (!nesting_enabled(vcpu->kvm)) break; ret = kvmhv_enter_nested_guest(vcpu); if (ret == H_INTERRUPT) { @@ -978,9 +988,8 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) break; case H_TLB_INVALIDATE: ret = H_FUNCTION; - if (!vcpu->kvm->arch.nested_enable) - break; - ret = kvmhv_do_nested_tlbie(vcpu); + if (nesting_enabled(vcpu->kvm)) + ret = kvmhv_do_nested_tlbie(vcpu); break; default: @@ -4508,10 +4517,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) /* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm) { - if (kvm->arch.nested_enable) { - kvm->arch.nested_enable = false; + if (nesting_enabled(kvm)) kvmhv_release_all_nested(kvm); - } kvmppc_free_radix(kvm); kvmppc_update_lpcr(kvm, LPCR_VPM1, LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR); @@ -4788,7 +4795,7 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) /* Perform global invalidation and return lpid to the pool */ if (cpu_has_feature(CPU_FTR_ARCH_300)) { - if (kvm->arch.nested_enable) + if (nesting_enabled(kvm)) kvmhv_release_all_nested(kvm); kvm->arch.process_table = 0; kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); @@ -5181,6 +5188,19 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg) return err; } +static int kvmhv_enable_nested(struct kvm *kvm) +{ + if (!nested) + return -EPERM; + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + return -ENODEV; + + /* kvm == NULL means the caller is testing if the capability exists */ + if (kvm) + kvm->arch.nested_enable = true; + return 0; +} + static struct kvmppc_ops kvm_ops_hv = { .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, @@ -5220,6 +5240,7 @@ static struct kvmppc_ops kvm_ops_hv = { .configure_mmu = kvmhv_configure_mmu, .get_rmmu_info = kvmhv_get_rmmu_info, .set_smt_mode = kvmhv_set_smt_mode, + .enable_nested = kvmhv_enable_nested, }; static int kvm_init_subcore_bitmap(void) diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 1f4b128894a0..2869a299c4ed 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -597,6 +597,10 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) && cpu_has_feature(CPU_FTR_HVMODE)); break; + case KVM_CAP_PPC_NESTED_HV: + r = !!(hv_enabled && kvmppc_hv_ops->enable_nested && + !kvmppc_hv_ops->enable_nested(NULL)); + break; #endif case KVM_CAP_SYNC_MMU: #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE @@ -2115,6 +2119,14 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags); break; } + + case KVM_CAP_PPC_NESTED_HV: + r = -EINVAL; + if (!is_kvmppc_hv_enabled(kvm) || + !kvm->arch.kvm_ops->enable_nested) + break; + r = kvm->arch.kvm_ops->enable_nested(kvm); + break; #endif default: r = -EINVAL; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 251be353f950..d9cec6b5cb37 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -953,6 +953,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_NESTED_STATE 157 #define KVM_CAP_ARM_INJECT_SERROR_ESR 158 #define KVM_CAP_MSR_PLATFORM_INFO 159 +#define KVM_CAP_PPC_NESTED_HV 160 #ifdef KVM_CAP_IRQ_ROUTING From 901f8c3f6feb0225c14b3bc6237850fb921d2f2d Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 8 Oct 2018 14:24:30 +1100 Subject: [PATCH 094/204] KVM: PPC: Book3S HV: Add NO_HASH flag to GET_SMMU_INFO ioctl result This adds a KVM_PPC_NO_HASH flag to the flags field of the kvm_ppc_smmu_info struct, and arranges for it to be set when running as a nested hypervisor, as an unambiguous indication to userspace that HPT guests are not supported. Reporting the KVM_CAP_PPC_MMU_HASH_V3 capability as false could be taken as indicating only that the new HPT features in ISA V3.0 are not supported, leaving it ambiguous whether pre-V3.0 HPT features are supported. Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- Documentation/virtual/kvm/api.txt | 4 ++++ arch/powerpc/kvm/book3s_hv.c | 4 ++++ include/uapi/linux/kvm.h | 1 + 3 files changed, 9 insertions(+) diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index fde48b6708f1..df98b6304769 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -2270,6 +2270,10 @@ The supported flags are: The emulated MMU supports 1T segments in addition to the standard 256M ones. + - KVM_PPC_NO_HASH + This flag indicates that HPT guests are not supported by KVM, + thus all guests must use radix MMU mode. + The "slb_size" field indicates how many SLB entries are supported The "sps" array contains 8 entries indicating the supported base diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 89bcf923d542..788bc61bd08c 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4257,6 +4257,10 @@ static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm, kvmppc_add_seg_page_size(&sps, 16, SLB_VSID_L | SLB_VSID_LP_01); kvmppc_add_seg_page_size(&sps, 24, SLB_VSID_L); + /* If running as a nested hypervisor, we don't support HPT guests */ + if (kvmhv_on_pseries()) + info->flags |= KVM_PPC_NO_HASH; + return 0; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index d9cec6b5cb37..7f2ff3a76995 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -719,6 +719,7 @@ struct kvm_ppc_one_seg_page_size { #define KVM_PPC_PAGE_SIZES_REAL 0x00000001 #define KVM_PPC_1T_SEGMENTS 0x00000002 +#define KVM_PPC_NO_HASH 0x00000004 struct kvm_ppc_smmu_info { __u64 flags; From 3de6347bf96abead5c5257b43a58ca0a057df894 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 13 Jul 2018 08:42:30 -0700 Subject: [PATCH 095/204] KVM: vmx: rename KVM_GUEST_CR0_MASK tp KVM_VM_CR0_ALWAYS_OFF The KVM_GUEST_CR0_MASK macro tracks CR0 bits that are forced to zero by the VMX architecture, i.e. CR0.{NW,CD} must always be zero in the hardware CR0 post-VMXON. Rename the macro to clarify its purpose, be consistent with KVM_VM_CR0_ALWAYS_ON and avoid confusion with the CR0_GUEST_HOST_MASK field. Signed-off-by: Sean Christopherson Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 612fd17be635..022642da8c5c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -131,7 +131,7 @@ static bool __read_mostly enable_preemption_timer = 1; module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO); #endif -#define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD) +#define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD) #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE #define KVM_VM_CR0_ALWAYS_ON \ (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | \ @@ -5260,7 +5260,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long hw_cr0; - hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK); + hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF); if (enable_unrestricted_guest) hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST; else { From 1438921c6dc1bbdce800bfc7db04bd15cb260fc0 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Mon, 8 Oct 2018 23:42:17 +0300 Subject: [PATCH 096/204] KVM: nVMX: Flush TLB entries tagged by dest EPTP on L1<->L2 transitions If L1 and L2 share VPID (because L1 don't use VPID or we haven't allocated a vpid02), we need to flush TLB on L1<->L2 transitions. Before this patch, this TLB flushing was done by vmx_flush_tlb(). If L0 use EPT, this will translate into INVEPT(active_eptp); However, if L1 use EPT, in L1->L2 VMEntry, active EPTP is EPTP01 but TLB entries populated by L2 are tagged with EPTP02. Therefore we should delay vmx_flush_tlb() until active_eptp is EPTP02. To achieve this, instead of directly calling vmx_flush_tlb() we request it to be called by KVM_REQ_TLB_FLUSH which is evaluated after KVM_REQ_LOAD_CR3 which sets the active_eptp to EPTP02 as required. Similarly, on L2->L1 VMExit, active EPTP is EPTP02 but TLB entries populated by L1 are tagged with EPTP01 and therefore we should delay vmx_flush_tlb() until active_eptp is EPTP01. Reviewed-by: Mihai Carabas Reviewed-by: Darren Kenny Reviewed-by: Nikita Leshenko Signed-off-by: Liran Alon Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 022642da8c5c..a4fa15b8b1ae 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -12290,7 +12290,15 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); } } else { - vmx_flush_tlb(vcpu, true); + /* + * If L1 use EPT, then L0 needs to execute INVEPT on + * EPTP02 instead of EPTP01. Therefore, delay TLB + * flush until vmcs02->eptp is fully updated by + * KVM_REQ_LOAD_CR3. Note that this assumes + * KVM_REQ_TLB_FLUSH is evaluated after + * KVM_REQ_LOAD_CR3 in vcpu_enter_guest(). + */ + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); } } @@ -13188,10 +13196,14 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, * Therefore, flush TLB only in case vmcs01 uses VPID and * vmcs12 don't use VPID as in this case L1 & L2 TLB entries * are both tagged with vmx->vpid. + * + * If vmcs12 uses EPT, we need to execute this flush on EPTP01 + * and therefore we request the TLB flush to happen only after VMCS EPTP + * has been set by KVM_REQ_LOAD_CR3. */ if (enable_vpid && !(nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02)) { - vmx_flush_tlb(vcpu, true); + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); } vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); From 3d5bdae8b16418781ec2c34a024aceee66267322 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Mon, 8 Oct 2018 23:42:18 +0300 Subject: [PATCH 097/204] KVM: nVMX: Use correct VPID02 when emulating L1 INVVPID In case L0 didn't allocate vmx->nested.vpid02 for L2, vmcs02->vpid is set to vmx->vpid. Consider this case when emulating L1 INVVPID in L0. Reviewed-by: Nikita Leshenko Reviewed-by: Mark Kanda Signed-off-by: Liran Alon Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a4fa15b8b1ae..12d39e2561be 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -9003,6 +9003,13 @@ static int handle_invept(struct kvm_vcpu *vcpu) return kvm_skip_emulated_instruction(vcpu); } +static u16 nested_get_vpid02(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid; +} + static int handle_invvpid(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -9014,6 +9021,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) u64 vpid; u64 gla; } operand; + u16 vpid02; if (!(vmx->nested.msrs.secondary_ctls_high & SECONDARY_EXEC_ENABLE_VPID) || @@ -9053,6 +9061,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) return kvm_skip_emulated_instruction(vcpu); } + vpid02 = nested_get_vpid02(vcpu); switch (type) { case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: if (!operand.vpid || @@ -9061,12 +9070,11 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); return kvm_skip_emulated_instruction(vcpu); } - if (cpu_has_vmx_invvpid_individual_addr() && - vmx->nested.vpid02) { + if (cpu_has_vmx_invvpid_individual_addr()) { __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, - vmx->nested.vpid02, operand.gla); + vpid02, operand.gla); } else - __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); + __vmx_flush_tlb(vcpu, vpid02, true); break; case VMX_VPID_EXTENT_SINGLE_CONTEXT: case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: @@ -9075,10 +9083,10 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); return kvm_skip_emulated_instruction(vcpu); } - __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); + __vmx_flush_tlb(vcpu, vpid02, true); break; case VMX_VPID_EXTENT_ALL_CONTEXT: - __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); + __vmx_flush_tlb(vcpu, vpid02, true); break; default: WARN_ON_ONCE(1); From 327c072187f7af4e4a371b635099f615c14082a7 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Mon, 8 Oct 2018 23:42:19 +0300 Subject: [PATCH 098/204] KVM: nVMX: Flush linear and combined mappings on VPID02 related flushes All VPID12s used on a given L1 vCPU is translated to a single VPID02 (vmx->nested.vpid02 or vmx->vpid). Therefore, on L1->L2 VMEntry, we need to invalidate linear and combined mappings tagged by VPID02 in case L1 uses VPID and vmcs12->vpid was changed since last L1->L2 VMEntry. However, current code invalidates the wrong mappings as it calls __vmx_flush_tlb() with invalidate_gpa parameter set to true which will result in invalidating combined and guest-physical mappings tagged with active EPTP which is EPTP01. Similarly, INVVPID emulation have the exact same issue. Fix both issues by just setting invalidate_gpa parameter to false which will result in invalidating linear and combined mappings tagged with given VPID02 as required. Reviewed-by: Nikita Leshenko Reviewed-by: Mark Kanda Signed-off-by: Liran Alon Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 12d39e2561be..9752f070108a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -9074,7 +9074,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid02, operand.gla); } else - __vmx_flush_tlb(vcpu, vpid02, true); + __vmx_flush_tlb(vcpu, vpid02, false); break; case VMX_VPID_EXTENT_SINGLE_CONTEXT: case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: @@ -9083,10 +9083,10 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); return kvm_skip_emulated_instruction(vcpu); } - __vmx_flush_tlb(vcpu, vpid02, true); + __vmx_flush_tlb(vcpu, vpid02, false); break; case VMX_VPID_EXTENT_ALL_CONTEXT: - __vmx_flush_tlb(vcpu, vpid02, true); + __vmx_flush_tlb(vcpu, vpid02, false); break; default: WARN_ON_ONCE(1); @@ -12295,7 +12295,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) { if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) { vmx->nested.last_vpid = vmcs12->virtual_processor_id; - __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); + __vmx_flush_tlb(vcpu, vmx->nested.vpid02, false); } } else { /* From efebf0aaec3de4df2bdb2949ebaacacd82de37db Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Mon, 8 Oct 2018 23:42:20 +0300 Subject: [PATCH 099/204] KVM: nVMX: Do not flush TLB on L1<->L2 transitions if L1 uses VPID and EPT If L1 uses VPID, it expects TLB to not be flushed on L1<->L2 transitions. However, code currently flushes TLB nonetheless if we didn't allocate a vpid02 for L2. As in this case, vmcs02->vpid == vmcs01->vpid == vmx->vpid. But, if L1 uses EPT, TLB entires populated by L2 are tagged with EPTP02 while TLB entries populated by L1 are tagged with EPTP01. Therefore, we can also avoid TLB flush if L1 uses VPID and EPT. Reviewed-by: Mihai Carabas Reviewed-by: Darren Kenny Reviewed-by: Nikita Leshenko Signed-off-by: Liran Alon Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 37 +++++++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 9752f070108a..b3c5517a89b3 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -11964,6 +11964,25 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne return 0; } +/* + * Returns if KVM is able to config CPU to tag TLB entries + * populated by L2 differently than TLB entries populated + * by L1. + * + * If L1 uses EPT, then TLB entries are tagged with different EPTP. + * + * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged + * with different VPID (L1 entries are tagged with vmx->vpid + * while L2 entries are tagged with vmx->nested.vpid02). + */ +static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + + return nested_cpu_has_ept(vmcs12) || + (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02); +} + static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -12292,10 +12311,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, * influence global bitmap(for vpid01 and vpid02 allocation) * even if spawn a lot of nested vCPUs. */ - if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) { + if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) { if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) { vmx->nested.last_vpid = vmcs12->virtual_processor_id; - __vmx_flush_tlb(vcpu, vmx->nested.vpid02, false); + __vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false); } } else { /* @@ -13194,23 +13213,21 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, load_vmcs12_mmu_host_state(vcpu, vmcs12); /* - * If vmcs01 don't use VPID, CPU flushes TLB on every + * If vmcs01 doesn't use VPID, CPU flushes TLB on every * VMEntry/VMExit. Thus, no need to flush TLB. * - * If vmcs12 uses VPID, TLB entries populated by L2 are - * tagged with vmx->nested.vpid02 while L1 entries are tagged - * with vmx->vpid. Thus, no need to flush TLB. + * If vmcs12 doesn't use VPID, L1 expects TLB to be + * flushed on every VMEntry/VMExit. * - * Therefore, flush TLB only in case vmcs01 uses VPID and - * vmcs12 don't use VPID as in this case L1 & L2 TLB entries - * are both tagged with vmx->vpid. + * Otherwise, we can preserve TLB entries as long as we are + * able to tag L1 TLB entries differently than L2 TLB entries. * * If vmcs12 uses EPT, we need to execute this flush on EPTP01 * and therefore we request the TLB flush to happen only after VMCS EPTP * has been set by KVM_REQ_LOAD_CR3. */ if (enable_vpid && - !(nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02)) { + (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) { kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); } From 3b8a5df6c4dc6df2ab17d099fb157032f80bdca2 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Tue, 9 Oct 2018 09:02:08 +0800 Subject: [PATCH 100/204] KVM: LAPIC: Tune lapic_timer_advance_ns automatically MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In cloud environment, lapic_timer_advance_ns is needed to be tuned for every CPU generations, and every host kernel versions(the kvm-unit-tests/tscdeadline_latency.flat is 5700 cycles for upstream kernel and 9600 cycles for our 3.10 product kernel, both preemption_timer=N, Skylake server). This patch adds the capability to automatically tune lapic_timer_advance_ns step by step, the initial value is 1000ns as 'commit d0659d946be0 ("KVM: x86: add option to advance tscdeadline hrtimer expiration")' recommended, it will be reduced when it is too early, and increased when it is too late. The guest_tsc and tsc_deadline are hard to equal, so we assume we are done when the delta is within a small scope e.g. 100 cycles. This patch reduces latency (kvm-unit-tests/tscdeadline_latency, busy waits, preemption_timer enabled) from ~2600 cyles to ~1200 cyles on our Skylake server. Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Liran Alon Signed-off-by: Wanpeng Li Signed-off-by: Paolo Bonzini --- arch/x86/kvm/lapic.c | 25 ++++++++++++++++++++++++- arch/x86/kvm/x86.c | 2 +- 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index fbb0e6df121b..91ffb63397f5 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -70,6 +70,11 @@ #define APIC_BROADCAST 0xFF #define X2APIC_BROADCAST 0xFFFFFFFFul +static bool lapic_timer_advance_adjust_done = false; +#define LAPIC_TIMER_ADVANCE_ADJUST_DONE 100 +/* step-by-step approximation to mitigate fluctuation */ +#define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8 + static inline int apic_test_vector(int vec, void *bitmap) { return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); @@ -1472,7 +1477,7 @@ static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu) void wait_lapic_expire(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; - u64 guest_tsc, tsc_deadline; + u64 guest_tsc, tsc_deadline, ns; if (!lapic_in_kernel(vcpu)) return; @@ -1492,6 +1497,24 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu) if (guest_tsc < tsc_deadline) __delay(min(tsc_deadline - guest_tsc, nsec_to_cycles(vcpu, lapic_timer_advance_ns))); + + if (!lapic_timer_advance_adjust_done) { + /* too early */ + if (guest_tsc < tsc_deadline) { + ns = (tsc_deadline - guest_tsc) * 1000000ULL; + do_div(ns, vcpu->arch.virtual_tsc_khz); + lapic_timer_advance_ns -= min((unsigned int)ns, + lapic_timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP); + } else { + /* too late */ + ns = (guest_tsc - tsc_deadline) * 1000000ULL; + do_div(ns, vcpu->arch.virtual_tsc_khz); + lapic_timer_advance_ns += min((unsigned int)ns, + lapic_timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP); + } + if (abs(guest_tsc - tsc_deadline) < LAPIC_TIMER_ADVANCE_ADJUST_DONE) + lapic_timer_advance_adjust_done = true; + } } static void start_sw_tscdeadline(struct kvm_lapic *apic) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ca717737347e..1f3f95557703 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -136,7 +136,7 @@ static u32 __read_mostly tsc_tolerance_ppm = 250; module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); /* lapic timer advance (tscdeadline mode only) in nanoseconds */ -unsigned int __read_mostly lapic_timer_advance_ns = 0; +unsigned int __read_mostly lapic_timer_advance_ns = 1000; module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR); EXPORT_SYMBOL_GPL(lapic_timer_advance_ns); From 6c930268bcc4cef62194daf151097d2b94f67718 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 18 Sep 2018 19:54:24 +0200 Subject: [PATCH 101/204] kvm: selftests: vcpu_setup: set cr4.osfxsr Guest code may want to call functions that have variable arguments. To do so, we either need to compile with -mno-sse or enable SSE in the VCPUs. As it should be pretty safe to turn on the feature, and -mno-sse would make linking test code with standard libraries difficult, we choose the feature enabling. Signed-off-by: Andrew Jones Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/lib/x86.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/lib/x86.c b/tools/testing/selftests/kvm/lib/x86.c index a3122f1949a8..bb6c65ebfa77 100644 --- a/tools/testing/selftests/kvm/lib/x86.c +++ b/tools/testing/selftests/kvm/lib/x86.c @@ -626,7 +626,7 @@ void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot) switch (vm->mode) { case VM_MODE_FLAT48PG: sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG; - sregs.cr4 |= X86_CR4_PAE; + sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR; sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX); kvm_seg_set_unusable(&sregs.ldt); From 14c47b7530e2db1ab1d42ebbe99b2a58b8443ce7 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 18 Sep 2018 19:54:25 +0200 Subject: [PATCH 102/204] kvm: selftests: introduce ucall Rework the guest exit to userspace code to generalize the concept into what it is, a "hypercall to userspace", and provide two implementations of it: the PortIO version currently used, but only useable by x86, and an MMIO version that other architectures (except s390) can use. Signed-off-by: Andrew Jones Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/Makefile | 2 +- .../selftests/kvm/cr4_cpuid_sync_test.c | 12 +- tools/testing/selftests/kvm/dirty_log_test.c | 5 +- .../testing/selftests/kvm/include/kvm_util.h | 68 +++++---- tools/testing/selftests/kvm/lib/kvm_util.c | 15 +- .../selftests/kvm/lib/kvm_util_internal.h | 1 + tools/testing/selftests/kvm/lib/ucall.c | 144 ++++++++++++++++++ .../selftests/kvm/platform_info_test.c | 12 +- tools/testing/selftests/kvm/state_test.c | 23 +-- .../selftests/kvm/vmx_tsc_adjust_test.c | 17 +-- 10 files changed, 218 insertions(+), 81 deletions(-) create mode 100644 tools/testing/selftests/kvm/lib/ucall.c diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index ec32dad3c3f0..08426f036353 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -3,7 +3,7 @@ all: top_srcdir = ../../../../ UNAME_M := $(shell uname -m) -LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c +LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/ucall.c lib/sparsebit.c LIBKVM_x86_64 = lib/x86.c lib/vmx.c TEST_GEN_PROGS_x86_64 = platform_info_test diff --git a/tools/testing/selftests/kvm/cr4_cpuid_sync_test.c b/tools/testing/selftests/kvm/cr4_cpuid_sync_test.c index 11ec358bf969..fd4f419fe9ab 100644 --- a/tools/testing/selftests/kvm/cr4_cpuid_sync_test.c +++ b/tools/testing/selftests/kvm/cr4_cpuid_sync_test.c @@ -67,6 +67,7 @@ int main(int argc, char *argv[]) struct kvm_vm *vm; struct kvm_sregs sregs; struct kvm_cpuid_entry2 *entry; + struct ucall uc; int rc; entry = kvm_get_supported_cpuid_entry(1); @@ -87,21 +88,20 @@ int main(int argc, char *argv[]) rc = _vcpu_run(vm, VCPU_ID); if (run->exit_reason == KVM_EXIT_IO) { - switch (run->io.port) { - case GUEST_PORT_SYNC: + switch (get_ucall(vm, VCPU_ID, &uc)) { + case UCALL_SYNC: /* emulate hypervisor clearing CR4.OSXSAVE */ vcpu_sregs_get(vm, VCPU_ID, &sregs); sregs.cr4 &= ~X86_CR4_OSXSAVE; vcpu_sregs_set(vm, VCPU_ID, &sregs); break; - case GUEST_PORT_ABORT: + case UCALL_ABORT: TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit."); break; - case GUEST_PORT_DONE: + case UCALL_DONE: goto done; default: - TEST_ASSERT(false, "Unknown port 0x%x.", - run->io.port); + TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd); } } } diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c index 0c2cdc105f96..7cf3e4ae6046 100644 --- a/tools/testing/selftests/kvm/dirty_log_test.c +++ b/tools/testing/selftests/kvm/dirty_log_test.c @@ -110,7 +110,7 @@ void *vcpu_worker(void *data) uint64_t loops, *guest_array, pages_count = 0; struct kvm_vm *vm = data; struct kvm_run *run; - struct guest_args args; + struct ucall uc; run = vcpu_state(vm, VCPU_ID); @@ -124,9 +124,8 @@ void *vcpu_worker(void *data) while (!READ_ONCE(host_quit)) { /* Let the guest to dirty these random pages */ ret = _vcpu_run(vm, VCPU_ID); - guest_args_read(vm, VCPU_ID, &args); if (run->exit_reason == KVM_EXIT_IO && - args.port == GUEST_PORT_SYNC) { + get_ucall(vm, VCPU_ID, &uc) == UCALL_SYNC) { pages_count += TEST_PAGES_PER_LOOP; generate_random_array(guest_array, TEST_PAGES_PER_LOOP); } else { diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 3acf9a91704c..22667dd48a9c 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -152,43 +152,49 @@ allocate_kvm_dirty_log(struct kvm_userspace_memory_region *region); int vm_create_device(struct kvm_vm *vm, struct kvm_create_device *cd); -#define GUEST_PORT_SYNC 0x1000 -#define GUEST_PORT_ABORT 0x1001 -#define GUEST_PORT_DONE 0x1002 +#define sync_global_to_guest(vm, g) ({ \ + typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ + memcpy(_p, &(g), sizeof(g)); \ +}) -static inline void __exit_to_l0(uint16_t port, uint64_t arg0, uint64_t arg1) -{ - __asm__ __volatile__("in %[port], %%al" - : - : [port]"d"(port), "D"(arg0), "S"(arg1) - : "rax"); -} +#define sync_global_from_guest(vm, g) ({ \ + typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ + memcpy(&(g), _p, sizeof(g)); \ +}) -/* - * Allows to pass three arguments to the host: port is 16bit wide, - * arg0 & arg1 are 64bit wide - */ -#define GUEST_SYNC_ARGS(_port, _arg0, _arg1) \ - __exit_to_l0(_port, (uint64_t) (_arg0), (uint64_t) (_arg1)) +/* ucall implementation types */ +typedef enum { + UCALL_PIO, + UCALL_MMIO, +} ucall_type_t; -#define GUEST_ASSERT(_condition) do { \ - if (!(_condition)) \ - GUEST_SYNC_ARGS(GUEST_PORT_ABORT, \ - "Failed guest assert: " \ - #_condition, __LINE__); \ - } while (0) +/* Common ucalls */ +enum { + UCALL_NONE, + UCALL_SYNC, + UCALL_ABORT, + UCALL_DONE, +}; -#define GUEST_SYNC(stage) GUEST_SYNC_ARGS(GUEST_PORT_SYNC, "hello", stage) +#define UCALL_MAX_ARGS 6 -#define GUEST_DONE() GUEST_SYNC_ARGS(GUEST_PORT_DONE, 0, 0) +struct ucall { + uint64_t cmd; + uint64_t args[UCALL_MAX_ARGS]; +}; -struct guest_args { - uint64_t arg0; - uint64_t arg1; - uint16_t port; -} __attribute__ ((packed)); +void ucall_init(struct kvm_vm *vm, ucall_type_t type, void *arg); +void ucall_uninit(struct kvm_vm *vm); +void ucall(uint64_t cmd, int nargs, ...); +uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc); -void guest_args_read(struct kvm_vm *vm, uint32_t vcpu_id, - struct guest_args *args); +#define GUEST_SYNC(stage) ucall(UCALL_SYNC, 2, "hello", stage) +#define GUEST_DONE() ucall(UCALL_DONE, 0) +#define GUEST_ASSERT(_condition) do { \ + if (!(_condition)) \ + ucall(UCALL_ABORT, 2, \ + "Failed guest assert: " \ + #_condition, __LINE__); \ +} while (0) #endif /* SELFTEST_KVM_UTIL_H */ diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 6fd8c089cafc..4bd31bf04f53 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -133,6 +133,7 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) case VM_MODE_FLAT48PG: vm->page_size = 0x1000; vm->page_shift = 12; + vm->va_bits = 48; /* Limit to 48-bit canonical virtual addresses. */ vm->vpages_valid = sparsebit_alloc(); @@ -1669,17 +1670,3 @@ void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) { return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); } - -void guest_args_read(struct kvm_vm *vm, uint32_t vcpu_id, - struct guest_args *args) -{ - struct kvm_run *run = vcpu_state(vm, vcpu_id); - struct kvm_regs regs; - - memset(®s, 0, sizeof(regs)); - vcpu_regs_get(vm, vcpu_id, ®s); - - args->port = run->io.port; - args->arg0 = regs.rdi; - args->arg1 = regs.rsi; -} diff --git a/tools/testing/selftests/kvm/lib/kvm_util_internal.h b/tools/testing/selftests/kvm/lib/kvm_util_internal.h index 542ed606b338..8fa7c9f7567a 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util_internal.h +++ b/tools/testing/selftests/kvm/lib/kvm_util_internal.h @@ -47,6 +47,7 @@ struct kvm_vm { int fd; unsigned int page_size; unsigned int page_shift; + unsigned int va_bits; uint64_t max_gfn; struct vcpu *vcpu_head; struct userspace_mem_region *userspace_mem_region_head; diff --git a/tools/testing/selftests/kvm/lib/ucall.c b/tools/testing/selftests/kvm/lib/ucall.c new file mode 100644 index 000000000000..4777f9bb5194 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/ucall.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ucall support. A ucall is a "hypercall to userspace". + * + * Copyright (C) 2018, Red Hat, Inc. + */ +#include "kvm_util.h" +#include "kvm_util_internal.h" + +#define UCALL_PIO_PORT ((uint16_t)0x1000) + +static ucall_type_t ucall_type; +static vm_vaddr_t *ucall_exit_mmio_addr; + +static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa) +{ + if (kvm_userspace_memory_region_find(vm, gpa, gpa + 1)) + return false; + + virt_pg_map(vm, gpa, gpa, 0); + + ucall_exit_mmio_addr = (vm_vaddr_t *)gpa; + sync_global_to_guest(vm, ucall_exit_mmio_addr); + + return true; +} + +void ucall_init(struct kvm_vm *vm, ucall_type_t type, void *arg) +{ + ucall_type = type; + sync_global_to_guest(vm, ucall_type); + + if (type == UCALL_PIO) + return; + + if (type == UCALL_MMIO) { + vm_paddr_t gpa, start, end, step; + bool ret; + + if (arg) { + gpa = (vm_paddr_t)arg; + ret = ucall_mmio_init(vm, gpa); + TEST_ASSERT(ret, "Can't set ucall mmio address to %lx", gpa); + return; + } + + /* + * Find an address within the allowed virtual address space, + * that does _not_ have a KVM memory region associated with it. + * Identity mapping an address like this allows the guest to + * access it, but as KVM doesn't know what to do with it, it + * will assume it's something userspace handles and exit with + * KVM_EXIT_MMIO. Well, at least that's how it works for AArch64. + * Here we start with a guess that the addresses around two + * thirds of the VA space are unmapped and then work both down + * and up from there in 1/6 VA space sized steps. + */ + start = 1ul << (vm->va_bits * 2 / 3); + end = 1ul << vm->va_bits; + step = 1ul << (vm->va_bits / 6); + for (gpa = start; gpa >= 0; gpa -= step) { + if (ucall_mmio_init(vm, gpa & ~(vm->page_size - 1))) + return; + } + for (gpa = start + step; gpa < end; gpa += step) { + if (ucall_mmio_init(vm, gpa & ~(vm->page_size - 1))) + return; + } + TEST_ASSERT(false, "Can't find a ucall mmio address"); + } +} + +void ucall_uninit(struct kvm_vm *vm) +{ + ucall_type = 0; + sync_global_to_guest(vm, ucall_type); + ucall_exit_mmio_addr = 0; + sync_global_to_guest(vm, ucall_exit_mmio_addr); +} + +static void ucall_pio_exit(struct ucall *uc) +{ +#ifdef __x86_64__ + asm volatile("in %[port], %%al" + : : [port] "d" (UCALL_PIO_PORT), "D" (uc) : "rax"); +#endif +} + +static void ucall_mmio_exit(struct ucall *uc) +{ + *ucall_exit_mmio_addr = (vm_vaddr_t)uc; +} + +void ucall(uint64_t cmd, int nargs, ...) +{ + struct ucall uc = { + .cmd = cmd, + }; + va_list va; + int i; + + nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS; + + va_start(va, nargs); + for (i = 0; i < nargs; ++i) + uc.args[i] = va_arg(va, uint64_t); + va_end(va); + + switch (ucall_type) { + case UCALL_PIO: + ucall_pio_exit(&uc); + break; + case UCALL_MMIO: + ucall_mmio_exit(&uc); + break; + }; +} + +uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) +{ + struct kvm_run *run = vcpu_state(vm, vcpu_id); + + memset(uc, 0, sizeof(*uc)); + +#ifdef __x86_64__ + if (ucall_type == UCALL_PIO && run->exit_reason == KVM_EXIT_IO && + run->io.port == UCALL_PIO_PORT) { + struct kvm_regs regs; + vcpu_regs_get(vm, vcpu_id, ®s); + memcpy(uc, addr_gva2hva(vm, (vm_vaddr_t)regs.rdi), sizeof(*uc)); + return uc->cmd; + } +#endif + if (ucall_type == UCALL_MMIO && run->exit_reason == KVM_EXIT_MMIO && + run->mmio.phys_addr == (uint64_t)ucall_exit_mmio_addr) { + vm_vaddr_t gva; + TEST_ASSERT(run->mmio.is_write && run->mmio.len == 8, + "Unexpected ucall exit mmio address access"); + gva = *(vm_vaddr_t *)run->mmio.data; + memcpy(uc, addr_gva2hva(vm, gva), sizeof(*uc)); + } + + return uc->cmd; +} diff --git a/tools/testing/selftests/kvm/platform_info_test.c b/tools/testing/selftests/kvm/platform_info_test.c index 3764e7121265..aa6a14331461 100644 --- a/tools/testing/selftests/kvm/platform_info_test.c +++ b/tools/testing/selftests/kvm/platform_info_test.c @@ -48,7 +48,7 @@ static void set_msr_platform_info_enabled(struct kvm_vm *vm, bool enable) static void test_msr_platform_info_enabled(struct kvm_vm *vm) { struct kvm_run *run = vcpu_state(vm, VCPU_ID); - struct guest_args args; + struct ucall uc; set_msr_platform_info_enabled(vm, true); vcpu_run(vm, VCPU_ID); @@ -56,11 +56,11 @@ static void test_msr_platform_info_enabled(struct kvm_vm *vm) "Exit_reason other than KVM_EXIT_IO: %u (%s),\n", run->exit_reason, exit_reason_str(run->exit_reason)); - guest_args_read(vm, VCPU_ID, &args); - TEST_ASSERT(args.port == GUEST_PORT_SYNC, - "Received IO from port other than PORT_HOST_SYNC: %u\n", - run->io.port); - TEST_ASSERT((args.arg1 & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) == + get_ucall(vm, VCPU_ID, &uc); + TEST_ASSERT(uc.cmd == UCALL_SYNC, + "Received ucall other than UCALL_SYNC: %u\n", + ucall); + TEST_ASSERT((uc.args[1] & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) == MSR_PLATFORM_INFO_MAX_TURBO_RATIO, "Expected MSR_PLATFORM_INFO to have max turbo ratio mask: %i.", MSR_PLATFORM_INFO_MAX_TURBO_RATIO); diff --git a/tools/testing/selftests/kvm/state_test.c b/tools/testing/selftests/kvm/state_test.c index 900e3e9dfb9f..cdf82735d6e7 100644 --- a/tools/testing/selftests/kvm/state_test.c +++ b/tools/testing/selftests/kvm/state_test.c @@ -127,6 +127,7 @@ int main(int argc, char *argv[]) struct kvm_vm *vm; struct kvm_run *run; struct kvm_x86_state *state; + struct ucall uc; int stage; struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); @@ -155,23 +156,23 @@ int main(int argc, char *argv[]) memset(®s1, 0, sizeof(regs1)); vcpu_regs_get(vm, VCPU_ID, ®s1); - switch (run->io.port) { - case GUEST_PORT_ABORT: - TEST_ASSERT(false, "%s at %s:%d", (const char *) regs1.rdi, - __FILE__, regs1.rsi); + switch (get_ucall(vm, VCPU_ID, &uc)) { + case UCALL_ABORT: + TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0], + __FILE__, uc.args[1]); /* NOT REACHED */ - case GUEST_PORT_SYNC: + case UCALL_SYNC: break; - case GUEST_PORT_DONE: + case UCALL_DONE: goto done; default: - TEST_ASSERT(false, "Unknown port 0x%x.", run->io.port); + TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd); } - /* PORT_SYNC is handled here. */ - TEST_ASSERT(!strcmp((const char *)regs1.rdi, "hello") && - regs1.rsi == stage, "Unexpected register values vmexit #%lx, got %lx", - stage, (ulong) regs1.rsi); + /* UCALL_SYNC is handled here. */ + TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") && + uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx", + stage, (ulong)uc.args[1]); state = vcpu_save_state(vm, VCPU_ID); kvm_vm_release(vm); diff --git a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c index 49bcc68b0235..8d487c78796a 100644 --- a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c +++ b/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c @@ -146,26 +146,25 @@ int main(int argc, char *argv[]) for (;;) { volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); - struct guest_args args; + struct ucall uc; vcpu_run(vm, VCPU_ID); - guest_args_read(vm, VCPU_ID, &args); TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", run->exit_reason, exit_reason_str(run->exit_reason)); - switch (args.port) { - case GUEST_PORT_ABORT: - TEST_ASSERT(false, "%s", (const char *) args.arg0); + switch (get_ucall(vm, VCPU_ID, &uc)) { + case UCALL_ABORT: + TEST_ASSERT(false, "%s", (const char *)uc.args[0]); /* NOT REACHED */ - case GUEST_PORT_SYNC: - report(args.arg1); + case UCALL_SYNC: + report(uc.args[1]); break; - case GUEST_PORT_DONE: + case UCALL_DONE: goto done; default: - TEST_ASSERT(false, "Unknown port 0x%x.", args.port); + TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd); } } From cc68765d418721ab854a03626c01e8eb82711922 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 18 Sep 2018 19:54:26 +0200 Subject: [PATCH 103/204] kvm: selftests: move arch-specific files to arch-specific locations Signed-off-by: Andrew Jones Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/.gitignore | 13 ++++++----- tools/testing/selftests/kvm/Makefile | 22 +++++++++---------- .../testing/selftests/kvm/include/kvm_util.h | 2 +- .../testing/selftests/kvm/include/sparsebit.h | 6 ++--- .../testing/selftests/kvm/include/test_util.h | 6 ++--- .../kvm/include/{x86.h => x86_64/processor.h} | 8 +++---- .../selftests/kvm/include/{ => x86_64}/vmx.h | 6 ++--- tools/testing/selftests/kvm/lib/assert.c | 2 +- .../selftests/kvm/lib/kvm_util_internal.h | 8 +++---- .../kvm/lib/{x86.c => x86_64/processor.c} | 6 ++--- .../selftests/kvm/lib/{ => x86_64}/vmx.c | 4 ++-- .../kvm/{ => x86_64}/cr4_cpuid_sync_test.c | 2 +- .../kvm/{ => x86_64}/dirty_log_test.c | 0 .../kvm/{ => x86_64}/platform_info_test.c | 2 +- .../kvm/{ => x86_64}/set_sregs_test.c | 2 +- .../selftests/kvm/{ => x86_64}/state_test.c | 2 +- .../kvm/{ => x86_64}/sync_regs_test.c | 2 +- .../kvm/{ => x86_64}/vmx_tsc_adjust_test.c | 6 ++--- 18 files changed, 50 insertions(+), 49 deletions(-) rename tools/testing/selftests/kvm/include/{x86.h => x86_64/processor.h} (99%) rename tools/testing/selftests/kvm/include/{ => x86_64}/vmx.h (99%) rename tools/testing/selftests/kvm/lib/{x86.c => x86_64/processor.c} (99%) rename tools/testing/selftests/kvm/lib/{ => x86_64}/vmx.c (99%) rename tools/testing/selftests/kvm/{ => x86_64}/cr4_cpuid_sync_test.c (98%) rename tools/testing/selftests/kvm/{ => x86_64}/dirty_log_test.c (100%) rename tools/testing/selftests/kvm/{ => x86_64}/platform_info_test.c (99%) rename tools/testing/selftests/kvm/{ => x86_64}/set_sregs_test.c (98%) rename tools/testing/selftests/kvm/{ => x86_64}/state_test.c (99%) rename tools/testing/selftests/kvm/{ => x86_64}/sync_regs_test.c (99%) rename tools/testing/selftests/kvm/{ => x86_64}/vmx_tsc_adjust_test.c (98%) diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index 5c34752e1cff..e0aa5411cf9b 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -1,6 +1,7 @@ -cr4_cpuid_sync_test -platform_info_test -set_sregs_test -sync_regs_test -vmx_tsc_adjust_test -state_test +/x86_64/cr4_cpuid_sync_test +/x86_64/platform_info_test +/x86_64/set_sregs_test +/x86_64/sync_regs_test +/x86_64/vmx_tsc_adjust_test +/x86_64/state_test +/x86_64/dirty_log_test diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 08426f036353..734a60da9776 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -1,26 +1,26 @@ all: -top_srcdir = ../../../../ +top_srcdir = ../../../.. UNAME_M := $(shell uname -m) LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/ucall.c lib/sparsebit.c -LIBKVM_x86_64 = lib/x86.c lib/vmx.c +LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c -TEST_GEN_PROGS_x86_64 = platform_info_test -TEST_GEN_PROGS_x86_64 += set_sregs_test -TEST_GEN_PROGS_x86_64 += sync_regs_test -TEST_GEN_PROGS_x86_64 += vmx_tsc_adjust_test -TEST_GEN_PROGS_x86_64 += cr4_cpuid_sync_test -TEST_GEN_PROGS_x86_64 += state_test -TEST_GEN_PROGS_x86_64 += dirty_log_test +TEST_GEN_PROGS_x86_64 = x86_64/platform_info_test +TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test +TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test +TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test +TEST_GEN_PROGS_x86_64 += x86_64/cr4_cpuid_sync_test +TEST_GEN_PROGS_x86_64 += x86_64/state_test +TEST_GEN_PROGS_x86_64 += x86_64/dirty_log_test TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M)) LIBKVM += $(LIBKVM_$(UNAME_M)) INSTALL_HDR_PATH = $(top_srcdir)/usr LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ -LINUX_TOOL_INCLUDE = $(top_srcdir)tools/include -CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$( #include @@ -72,4 +72,4 @@ void sparsebit_validate_internal(struct sparsebit *sbit); } #endif -#endif /* _TEST_SPARSEBIT_H_ */ +#endif /* SELFTEST_KVM_SPARSEBIT_H */ diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h index 73c3933436ec..c7dafe8bd02c 100644 --- a/tools/testing/selftests/kvm/include/test_util.h +++ b/tools/testing/selftests/kvm/include/test_util.h @@ -7,8 +7,8 @@ * */ -#ifndef TEST_UTIL_H -#define TEST_UTIL_H 1 +#ifndef SELFTEST_KVM_TEST_UTIL_H +#define SELFTEST_KVM_TEST_UTIL_H #include #include @@ -41,4 +41,4 @@ void test_assert(bool exp, const char *exp_str, #a, #b, #a, (unsigned long) __a, #b, (unsigned long) __b); \ } while (0) -#endif /* TEST_UTIL_H */ +#endif /* SELFTEST_KVM_TEST_UTIL_H */ diff --git a/tools/testing/selftests/kvm/include/x86.h b/tools/testing/selftests/kvm/include/x86_64/processor.h similarity index 99% rename from tools/testing/selftests/kvm/include/x86.h rename to tools/testing/selftests/kvm/include/x86_64/processor.h index 42c3596815b8..bb76e8db9b2b 100644 --- a/tools/testing/selftests/kvm/include/x86.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -1,5 +1,5 @@ /* - * tools/testing/selftests/kvm/include/x86.h + * tools/testing/selftests/kvm/include/x86_64/processor.h * * Copyright (C) 2018, Google LLC. * @@ -7,8 +7,8 @@ * */ -#ifndef SELFTEST_KVM_X86_H -#define SELFTEST_KVM_X86_H +#ifndef SELFTEST_KVM_PROCESSOR_H +#define SELFTEST_KVM_PROCESSOR_H #include #include @@ -1044,4 +1044,4 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s #define MSR_VM_IGNNE 0xc0010115 #define MSR_VM_HSAVE_PA 0xc0010117 -#endif /* !SELFTEST_KVM_X86_H */ +#endif /* SELFTEST_KVM_PROCESSOR_H */ diff --git a/tools/testing/selftests/kvm/include/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h similarity index 99% rename from tools/testing/selftests/kvm/include/vmx.h rename to tools/testing/selftests/kvm/include/x86_64/vmx.h index b9ffe1024d3a..12ebd836f7ef 100644 --- a/tools/testing/selftests/kvm/include/vmx.h +++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h @@ -1,5 +1,5 @@ /* - * tools/testing/selftests/kvm/include/vmx.h + * tools/testing/selftests/kvm/include/x86_64/vmx.h * * Copyright (C) 2018, Google LLC. * @@ -11,7 +11,7 @@ #define SELFTEST_KVM_VMX_H #include -#include "x86.h" +#include "processor.h" #define CPUID_VMX_BIT 5 @@ -549,4 +549,4 @@ struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva); bool prepare_for_vmx_operation(struct vmx_pages *vmx); void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp); -#endif /* !SELFTEST_KVM_VMX_H */ +#endif /* SELFTEST_KVM_VMX_H */ diff --git a/tools/testing/selftests/kvm/lib/assert.c b/tools/testing/selftests/kvm/lib/assert.c index cd01144d27c8..6398efe67885 100644 --- a/tools/testing/selftests/kvm/lib/assert.c +++ b/tools/testing/selftests/kvm/lib/assert.c @@ -13,7 +13,7 @@ #include #include -#include "../../kselftest.h" +#include "kselftest.h" /* Dumps the current stack trace to stderr. */ static void __attribute__((noinline)) test_dump_stack(void); diff --git a/tools/testing/selftests/kvm/lib/kvm_util_internal.h b/tools/testing/selftests/kvm/lib/kvm_util_internal.h index 8fa7c9f7567a..36367be0fe26 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util_internal.h +++ b/tools/testing/selftests/kvm/lib/kvm_util_internal.h @@ -1,13 +1,13 @@ /* - * tools/testing/selftests/kvm/lib/kvm_util.c + * tools/testing/selftests/kvm/lib/kvm_util_internal.h * * Copyright (C) 2018, Google LLC. * * This work is licensed under the terms of the GNU GPL, version 2. */ -#ifndef KVM_UTIL_INTERNAL_H -#define KVM_UTIL_INTERNAL_H 1 +#ifndef SELFTEST_KVM_UTIL_INTERNAL_H +#define SELFTEST_KVM_UTIL_INTERNAL_H #include "sparsebit.h" @@ -70,4 +70,4 @@ void regs_dump(FILE *stream, struct kvm_regs *regs, void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent); -#endif +#endif /* SELFTEST_KVM_UTIL_INTERNAL_H */ diff --git a/tools/testing/selftests/kvm/lib/x86.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c similarity index 99% rename from tools/testing/selftests/kvm/lib/x86.c rename to tools/testing/selftests/kvm/lib/x86_64/processor.c index bb6c65ebfa77..79c2c5c203c0 100644 --- a/tools/testing/selftests/kvm/lib/x86.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -1,5 +1,5 @@ /* - * tools/testing/selftests/kvm/lib/x86.c + * tools/testing/selftests/kvm/lib/x86_64/processor.c * * Copyright (C) 2018, Google LLC. * @@ -10,8 +10,8 @@ #include "test_util.h" #include "kvm_util.h" -#include "kvm_util_internal.h" -#include "x86.h" +#include "../kvm_util_internal.h" +#include "processor.h" /* Minimum physical address used for virtual translation tables. */ #define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000 diff --git a/tools/testing/selftests/kvm/lib/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c similarity index 99% rename from tools/testing/selftests/kvm/lib/vmx.c rename to tools/testing/selftests/kvm/lib/x86_64/vmx.c index b987c3c970eb..d7c401472247 100644 --- a/tools/testing/selftests/kvm/lib/vmx.c +++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c @@ -1,5 +1,5 @@ /* - * tools/testing/selftests/kvm/lib/x86.c + * tools/testing/selftests/kvm/lib/x86_64/vmx.c * * Copyright (C) 2018, Google LLC. * @@ -10,7 +10,7 @@ #include "test_util.h" #include "kvm_util.h" -#include "x86.h" +#include "processor.h" #include "vmx.h" /* Allocate memory regions for nested VMX tests. diff --git a/tools/testing/selftests/kvm/cr4_cpuid_sync_test.c b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c similarity index 98% rename from tools/testing/selftests/kvm/cr4_cpuid_sync_test.c rename to tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c index fd4f419fe9ab..d503a51fad30 100644 --- a/tools/testing/selftests/kvm/cr4_cpuid_sync_test.c +++ b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c @@ -17,7 +17,7 @@ #include "test_util.h" #include "kvm_util.h" -#include "x86.h" +#include "processor.h" #define X86_FEATURE_XSAVE (1<<26) #define X86_FEATURE_OSXSAVE (1<<27) diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/x86_64/dirty_log_test.c similarity index 100% rename from tools/testing/selftests/kvm/dirty_log_test.c rename to tools/testing/selftests/kvm/x86_64/dirty_log_test.c diff --git a/tools/testing/selftests/kvm/platform_info_test.c b/tools/testing/selftests/kvm/x86_64/platform_info_test.c similarity index 99% rename from tools/testing/selftests/kvm/platform_info_test.c rename to tools/testing/selftests/kvm/x86_64/platform_info_test.c index aa6a14331461..eb3e7a838cb4 100644 --- a/tools/testing/selftests/kvm/platform_info_test.c +++ b/tools/testing/selftests/kvm/x86_64/platform_info_test.c @@ -19,7 +19,7 @@ #include "test_util.h" #include "kvm_util.h" -#include "x86.h" +#include "processor.h" #define VCPU_ID 0 #define MSR_PLATFORM_INFO_MAX_TURBO_RATIO 0xff00 diff --git a/tools/testing/selftests/kvm/set_sregs_test.c b/tools/testing/selftests/kvm/x86_64/set_sregs_test.c similarity index 98% rename from tools/testing/selftests/kvm/set_sregs_test.c rename to tools/testing/selftests/kvm/x86_64/set_sregs_test.c index 881419d5746e..35640e8e95bc 100644 --- a/tools/testing/selftests/kvm/set_sregs_test.c +++ b/tools/testing/selftests/kvm/x86_64/set_sregs_test.c @@ -22,7 +22,7 @@ #include "test_util.h" #include "kvm_util.h" -#include "x86.h" +#include "processor.h" #define VCPU_ID 5 diff --git a/tools/testing/selftests/kvm/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c similarity index 99% rename from tools/testing/selftests/kvm/state_test.c rename to tools/testing/selftests/kvm/x86_64/state_test.c index cdf82735d6e7..43df194a7c1e 100644 --- a/tools/testing/selftests/kvm/state_test.c +++ b/tools/testing/selftests/kvm/x86_64/state_test.c @@ -17,7 +17,7 @@ #include "test_util.h" #include "kvm_util.h" -#include "x86.h" +#include "processor.h" #include "vmx.h" #define VCPU_ID 5 diff --git a/tools/testing/selftests/kvm/sync_regs_test.c b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c similarity index 99% rename from tools/testing/selftests/kvm/sync_regs_test.c rename to tools/testing/selftests/kvm/x86_64/sync_regs_test.c index 213343e5dff9..c8478ce9ea77 100644 --- a/tools/testing/selftests/kvm/sync_regs_test.c +++ b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c @@ -19,7 +19,7 @@ #include "test_util.h" #include "kvm_util.h" -#include "x86.h" +#include "processor.h" #define VCPU_ID 5 diff --git a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c similarity index 98% rename from tools/testing/selftests/kvm/vmx_tsc_adjust_test.c rename to tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c index 8d487c78796a..38a91a5f04ac 100644 --- a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c @@ -1,5 +1,5 @@ /* - * gtests/tests/vmx_tsc_adjust_test.c + * vmx_tsc_adjust_test * * Copyright (C) 2018, Google LLC. * @@ -22,13 +22,13 @@ #include "test_util.h" #include "kvm_util.h" -#include "x86.h" +#include "processor.h" #include "vmx.h" #include #include -#include "../kselftest.h" +#include "kselftest.h" #ifndef MSR_IA32_TSC_ADJUST #define MSR_IA32_TSC_ADJUST 0x3b From eea192bfd96db157151d27dd9c5e3e6a50c5d6be Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 18 Sep 2018 19:54:27 +0200 Subject: [PATCH 104/204] kvm: selftests: add cscope make target Signed-off-by: Andrew Jones Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/Makefile | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 734a60da9776..959be146f789 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -29,7 +29,7 @@ include ../lib.mk STATIC_LIBS := $(OUTPUT)/libkvm.a LIBKVM_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM)) -EXTRA_CLEAN += $(LIBKVM_OBJ) $(STATIC_LIBS) +EXTRA_CLEAN += $(LIBKVM_OBJ) $(STATIC_LIBS) cscope.* x := $(shell mkdir -p $(sort $(dir $(LIBKVM_OBJ)))) $(LIBKVM_OBJ): $(OUTPUT)/%.o: %.c @@ -41,3 +41,12 @@ $(OUTPUT)/libkvm.a: $(LIBKVM_OBJ) all: $(STATIC_LIBS) $(TEST_GEN_PROGS): $(STATIC_LIBS) $(STATIC_LIBS):| khdr + +cscope: include_paths = $(LINUX_TOOL_INCLUDE) $(LINUX_HDR_PATH) include lib .. +cscope: + $(RM) cscope.* + (find $(include_paths) -name '*.h' \ + -exec realpath --relative-base=$(PWD) {} \;; \ + find . -name '*.c' \ + -exec realpath --relative-base=$(PWD) {} \;) | sort -u > cscope.files + cscope -b From eabe7881d264fd8ee074636fe50c8a8af08fedfd Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 18 Sep 2018 19:54:28 +0200 Subject: [PATCH 105/204] kvm: selftests: tidy up kvm_util Tidy up kvm-util code: code/comment formatting, remove unused code, and move x86 specific code out. We also move vcpu_dump() out of common code, because not all arches (AArch64) have KVM_GET_REGS. Signed-off-by: Andrew Jones Signed-off-by: Paolo Bonzini --- .../testing/selftests/kvm/include/kvm_util.h | 80 ++-- .../selftests/kvm/include/x86_64/processor.h | 20 +- tools/testing/selftests/kvm/lib/kvm_util.c | 438 +++++------------- .../selftests/kvm/lib/kvm_util_internal.h | 22 +- .../selftests/kvm/lib/x86_64/processor.c | 245 ++++++++++ .../selftests/kvm/x86_64/dirty_log_test.c | 1 + 6 files changed, 407 insertions(+), 399 deletions(-) diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 7830c46f27d8..4d5e405ff246 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -17,12 +17,6 @@ #include "sparsebit.h" -/* - * Memslots can't cover the gfn starting at this gpa otherwise vCPUs can't be - * created. Only applies to VMs using EPT. - */ -#define KVM_DEFAULT_IDENTITY_MAP_ADDRESS 0xfffbc000ul - /* Callers of kvm_util only have an incomplete/opaque description of the * structure kvm_util is using to maintain the state of a VM. @@ -33,11 +27,11 @@ typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */ typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */ /* Minimum allocated guest virtual and physical addresses */ -#define KVM_UTIL_MIN_VADDR 0x2000 +#define KVM_UTIL_MIN_VADDR 0x2000 #define DEFAULT_GUEST_PHY_PAGES 512 #define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000 -#define DEFAULT_STACK_PGS 5 +#define DEFAULT_STACK_PGS 5 enum vm_guest_mode { VM_MODE_FLAT48PG, @@ -58,15 +52,15 @@ void kvm_vm_restart(struct kvm_vm *vmp, int perm); void kvm_vm_release(struct kvm_vm *vmp); void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log); -int kvm_memcmp_hva_gva(void *hva, - struct kvm_vm *vm, const vm_vaddr_t gva, size_t len); +int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva, + size_t len); void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename, - uint32_t data_memslot, uint32_t pgd_memslot); + uint32_t data_memslot, uint32_t pgd_memslot); void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); -void vcpu_dump(FILE *stream, struct kvm_vm *vm, - uint32_t vcpuid, uint8_t indent); +void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, + uint8_t indent); void vm_create_irqchip(struct kvm_vm *vm); @@ -75,13 +69,14 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, uint64_t guest_paddr, uint32_t slot, uint64_t npages, uint32_t flags); -void vcpu_ioctl(struct kvm_vm *vm, - uint32_t vcpuid, unsigned long ioctl, void *arg); +void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl, + void *arg); void vm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg); void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags); -void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, int gdt_memslot); +void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, + int gdt_memslot); vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, - uint32_t data_memslot, uint32_t pgd_memslot); + uint32_t data_memslot, uint32_t pgd_memslot); void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, size_t size, uint32_t pgd_memslot); void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa); @@ -93,56 +88,33 @@ struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid); void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid); int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid); void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid, - struct kvm_mp_state *mp_state); -void vcpu_regs_get(struct kvm_vm *vm, - uint32_t vcpuid, struct kvm_regs *regs); -void vcpu_regs_set(struct kvm_vm *vm, - uint32_t vcpuid, struct kvm_regs *regs); + struct kvm_mp_state *mp_state); +void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs); +void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs); void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...); -void vcpu_sregs_get(struct kvm_vm *vm, - uint32_t vcpuid, struct kvm_sregs *sregs); -void vcpu_sregs_set(struct kvm_vm *vm, - uint32_t vcpuid, struct kvm_sregs *sregs); -int _vcpu_sregs_set(struct kvm_vm *vm, - uint32_t vcpuid, struct kvm_sregs *sregs); +void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, + struct kvm_sregs *sregs); +void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, + struct kvm_sregs *sregs); +int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, + struct kvm_sregs *sregs); void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid, - struct kvm_vcpu_events *events); + struct kvm_vcpu_events *events); void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, - struct kvm_vcpu_events *events); -uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index); -void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index, - uint64_t msr_value); + struct kvm_vcpu_events *events); const char *exit_reason_str(unsigned int exit_reason); void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot); void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, - uint32_t pgd_memslot); -vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, - vm_paddr_t paddr_min, uint32_t memslot); - -struct kvm_cpuid2 *kvm_get_supported_cpuid(void); -void vcpu_set_cpuid( - struct kvm_vm *vm, uint32_t vcpuid, struct kvm_cpuid2 *cpuid); - -struct kvm_cpuid_entry2 * -kvm_get_supported_cpuid_index(uint32_t function, uint32_t index); - -static inline struct kvm_cpuid_entry2 * -kvm_get_supported_cpuid_entry(uint32_t function) -{ - return kvm_get_supported_cpuid_index(function, 0); -} + uint32_t pgd_memslot); +vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, + uint32_t memslot); struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_size, void *guest_code); void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code); -typedef void (*vmx_guest_code_t)(vm_vaddr_t vmxon_vaddr, - vm_paddr_t vmxon_paddr, - vm_vaddr_t vmcs_vaddr, - vm_paddr_t vmcs_paddr); - struct kvm_userspace_memory_region * kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end); diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index bb76e8db9b2b..e2884c2b81ff 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -305,7 +305,25 @@ static inline unsigned long get_xmm(int n) struct kvm_x86_state; struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid); -void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *state); +void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, + struct kvm_x86_state *state); + +struct kvm_cpuid2 *kvm_get_supported_cpuid(void); +void vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid, + struct kvm_cpuid2 *cpuid); + +struct kvm_cpuid_entry2 * +kvm_get_supported_cpuid_index(uint32_t function, uint32_t index); + +static inline struct kvm_cpuid_entry2 * +kvm_get_supported_cpuid_entry(uint32_t function) +{ + return kvm_get_supported_cpuid_index(function, 0); +} + +uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index); +void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index, + uint64_t msr_value); /* * Basic CPU control in CR0 diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 4bd31bf04f53..1a8f8f9e3635 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -16,8 +16,6 @@ #include #include -#define KVM_DEV_PATH "/dev/kvm" - #define KVM_UTIL_PGS_PER_HUGEPG 512 #define KVM_UTIL_MIN_PADDR 0x2000 @@ -30,7 +28,8 @@ static void *align(void *x, size_t size) return (void *) (((size_t) x + mask) & ~mask); } -/* Capability +/* + * Capability * * Input Args: * cap - Capability @@ -92,13 +91,13 @@ static void vm_open(struct kvm_vm *vm, int perm) if (vm->kvm_fd < 0) exit(KSFT_SKIP); - /* Create VM. */ vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, NULL); TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, " "rc: %i errno: %i", vm->fd, errno); } -/* VM Create +/* + * VM Create * * Input Args: * mode - VM Mode (e.g. VM_MODE_FLAT48PG) @@ -121,7 +120,6 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) struct kvm_vm *vm; int kvm_fd; - /* Allocate memory. */ vm = calloc(1, sizeof(*vm)); TEST_ASSERT(vm != NULL, "Insufficent Memory"); @@ -160,7 +158,8 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) return vm; } -/* VM Restart +/* + * VM Restart * * Input Args: * vm - VM that has been released before @@ -187,7 +186,8 @@ void kvm_vm_restart(struct kvm_vm *vmp, int perm) " rc: %i errno: %i\n" " slot: %u flags: 0x%x\n" " guest_phys_addr: 0x%lx size: 0x%lx", - ret, errno, region->region.slot, region->region.flags, + ret, errno, region->region.slot, + region->region.flags, region->region.guest_phys_addr, region->region.memory_size); } @@ -203,7 +203,8 @@ void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) strerror(-ret)); } -/* Userspace Memory Region Find +/* + * Userspace Memory Region Find * * Input Args: * vm - Virtual Machine @@ -221,8 +222,8 @@ void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) * of the regions is returned. Null is returned only when no overlapping * region exists. */ -static struct userspace_mem_region *userspace_mem_region_find( - struct kvm_vm *vm, uint64_t start, uint64_t end) +static struct userspace_mem_region * +userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) { struct userspace_mem_region *region; @@ -238,7 +239,8 @@ static struct userspace_mem_region *userspace_mem_region_find( return NULL; } -/* KVM Userspace Memory Region Find +/* + * KVM Userspace Memory Region Find * * Input Args: * vm - Virtual Machine @@ -266,7 +268,8 @@ kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, return ®ion->region; } -/* VCPU Find +/* + * VCPU Find * * Input Args: * vm - Virtual Machine @@ -281,8 +284,7 @@ kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, * returns a pointer to it. Returns NULL if the VM doesn't contain a VCPU * for the specified vcpuid. */ -struct vcpu *vcpu_find(struct kvm_vm *vm, - uint32_t vcpuid) +struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid) { struct vcpu *vcpup; @@ -294,7 +296,8 @@ struct vcpu *vcpu_find(struct kvm_vm *vm, return NULL; } -/* VM VCPU Remove +/* + * VM VCPU Remove * * Input Args: * vm - Virtual Machine @@ -331,11 +334,9 @@ void kvm_vm_release(struct kvm_vm *vmp) { int ret; - /* Free VCPUs. */ while (vmp->vcpu_head) vm_vcpu_rm(vmp, vmp->vcpu_head->id); - /* Close file descriptor for the VM. */ ret = close(vmp->fd); TEST_ASSERT(ret == 0, "Close of vm fd failed,\n" " vmp->fd: %i rc: %i errno: %i", vmp->fd, ret, errno); @@ -345,7 +346,8 @@ void kvm_vm_release(struct kvm_vm *vmp) " vmp->kvm_fd: %i rc: %i errno: %i", vmp->kvm_fd, ret, errno); } -/* Destroys and frees the VM pointed to by vmp. +/* + * Destroys and frees the VM pointed to by vmp. */ void kvm_vm_free(struct kvm_vm *vmp) { @@ -384,7 +386,8 @@ void kvm_vm_free(struct kvm_vm *vmp) free(vmp); } -/* Memory Compare, host virtual to guest virtual +/* + * Memory Compare, host virtual to guest virtual * * Input Args: * hva - Starting host virtual address @@ -406,23 +409,25 @@ void kvm_vm_free(struct kvm_vm *vmp) * a length of len, to the guest bytes starting at the guest virtual * address given by gva. */ -int kvm_memcmp_hva_gva(void *hva, - struct kvm_vm *vm, vm_vaddr_t gva, size_t len) +int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len) { size_t amt; - /* Compare a batch of bytes until either a match is found + /* + * Compare a batch of bytes until either a match is found * or all the bytes have been compared. */ for (uintptr_t offset = 0; offset < len; offset += amt) { uintptr_t ptr1 = (uintptr_t)hva + offset; - /* Determine host address for guest virtual address + /* + * Determine host address for guest virtual address * at offset. */ uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset); - /* Determine amount to compare on this pass. + /* + * Determine amount to compare on this pass. * Don't allow the comparsion to cross a page boundary. */ amt = len - offset; @@ -434,7 +439,8 @@ int kvm_memcmp_hva_gva(void *hva, assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift)); assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift)); - /* Perform the comparison. If there is a difference + /* + * Perform the comparison. If there is a difference * return that result to the caller, otherwise need * to continue on looking for a mismatch. */ @@ -443,109 +449,15 @@ int kvm_memcmp_hva_gva(void *hva, return ret; } - /* No mismatch found. Let the caller know the two memory + /* + * No mismatch found. Let the caller know the two memory * areas are equal. */ return 0; } -/* Allocate an instance of struct kvm_cpuid2 - * - * Input Args: None - * - * Output Args: None - * - * Return: A pointer to the allocated struct. The caller is responsible - * for freeing this struct. - * - * Since kvm_cpuid2 uses a 0-length array to allow a the size of the - * array to be decided at allocation time, allocation is slightly - * complicated. This function uses a reasonable default length for - * the array and performs the appropriate allocation. - */ -static struct kvm_cpuid2 *allocate_kvm_cpuid2(void) -{ - struct kvm_cpuid2 *cpuid; - int nent = 100; - size_t size; - - size = sizeof(*cpuid); - size += nent * sizeof(struct kvm_cpuid_entry2); - cpuid = malloc(size); - if (!cpuid) { - perror("malloc"); - abort(); - } - - cpuid->nent = nent; - - return cpuid; -} - -/* KVM Supported CPUID Get - * - * Input Args: None - * - * Output Args: - * - * Return: The supported KVM CPUID - * - * Get the guest CPUID supported by KVM. - */ -struct kvm_cpuid2 *kvm_get_supported_cpuid(void) -{ - static struct kvm_cpuid2 *cpuid; - int ret; - int kvm_fd; - - if (cpuid) - return cpuid; - - cpuid = allocate_kvm_cpuid2(); - kvm_fd = open(KVM_DEV_PATH, O_RDONLY); - if (kvm_fd < 0) - exit(KSFT_SKIP); - - ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid); - TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n", - ret, errno); - - close(kvm_fd); - return cpuid; -} - -/* Locate a cpuid entry. - * - * Input Args: - * cpuid: The cpuid. - * function: The function of the cpuid entry to find. - * - * Output Args: None - * - * Return: A pointer to the cpuid entry. Never returns NULL. - */ -struct kvm_cpuid_entry2 * -kvm_get_supported_cpuid_index(uint32_t function, uint32_t index) -{ - struct kvm_cpuid2 *cpuid; - struct kvm_cpuid_entry2 *entry = NULL; - int i; - - cpuid = kvm_get_supported_cpuid(); - for (i = 0; i < cpuid->nent; i++) { - if (cpuid->entries[i].function == function && - cpuid->entries[i].index == index) { - entry = &cpuid->entries[i]; - break; - } - } - - TEST_ASSERT(entry, "Guest CPUID entry not found: (EAX=%x, ECX=%x).", - function, index); - return entry; -} - -/* VM Userspace Memory Region Add +/* + * VM Userspace Memory Region Add * * Input Args: * vm - Virtual Machine @@ -587,7 +499,8 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, " vm->max_gfn: 0x%lx vm->page_size: 0x%x", guest_paddr, npages, vm->max_gfn, vm->page_size); - /* Confirm a mem region with an overlapping address doesn't + /* + * Confirm a mem region with an overlapping address doesn't * already exist. */ region = (struct userspace_mem_region *) userspace_mem_region_find( @@ -678,7 +591,8 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, vm->userspace_mem_region_head = region; } -/* Memslot to region +/* + * Memslot to region * * Input Args: * vm - Virtual Machine @@ -692,8 +606,8 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, * on error (e.g. currently no memory region using memslot as a KVM * memory slot ID). */ -static struct userspace_mem_region *memslot2region(struct kvm_vm *vm, - uint32_t memslot) +static struct userspace_mem_region * +memslot2region(struct kvm_vm *vm, uint32_t memslot) { struct userspace_mem_region *region; @@ -713,7 +627,8 @@ static struct userspace_mem_region *memslot2region(struct kvm_vm *vm, return region; } -/* VM Memory Region Flags Set +/* + * VM Memory Region Flags Set * * Input Args: * vm - Virtual Machine @@ -731,7 +646,6 @@ void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) int ret; struct userspace_mem_region *region; - /* Locate memory region. */ region = memslot2region(vm, slot); region->region.flags = flags; @@ -743,7 +657,8 @@ void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) ret, errno, slot, flags); } -/* VCPU mmap Size +/* + * VCPU mmap Size * * Input Args: None * @@ -773,7 +688,8 @@ static int vcpu_mmap_sz(void) return ret; } -/* VM VCPU Add +/* + * VM VCPU Add * * Input Args: * vm - Virtual Machine @@ -786,7 +702,8 @@ static int vcpu_mmap_sz(void) * Creates and adds to the VM specified by vm and virtual CPU with * the ID given by vcpuid. */ -void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, int gdt_memslot) +void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, + int gdt_memslot) { struct vcpu *vcpu; @@ -824,7 +741,8 @@ void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, int gdt_me vcpu_setup(vm, vcpuid, pgd_memslot, gdt_memslot); } -/* VM Virtual Address Unused Gap +/* + * VM Virtual Address Unused Gap * * Input Args: * vm - Virtual Machine @@ -844,14 +762,14 @@ void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, int gdt_me * sz unallocated bytes >= vaddr_min is available. */ static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, - vm_vaddr_t vaddr_min) + vm_vaddr_t vaddr_min) { uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; /* Determine lowest permitted virtual page index. */ uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; if ((pgidx_start * vm->page_size) < vaddr_min) - goto no_va_found; + goto no_va_found; /* Loop over section with enough valid virtual page indexes. */ if (!sparsebit_is_set_num(vm->vpages_valid, @@ -910,7 +828,8 @@ static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, return pgidx_start * vm->page_size; } -/* VM Virtual Address Allocate +/* + * VM Virtual Address Allocate * * Input Args: * vm - Virtual Machine @@ -931,13 +850,14 @@ static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, * a page. */ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, - uint32_t data_memslot, uint32_t pgd_memslot) + uint32_t data_memslot, uint32_t pgd_memslot) { uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); virt_pgd_alloc(vm, pgd_memslot); - /* Find an unused range of virtual page addresses of at least + /* + * Find an unused range of virtual page addresses of at least * pages in length. */ vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); @@ -991,7 +911,8 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, } } -/* Address VM Physical to Host Virtual +/* + * Address VM Physical to Host Virtual * * Input Args: * vm - Virtual Machine @@ -1023,7 +944,8 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) return NULL; } -/* Address Host Virtual to VM Physical +/* + * Address Host Virtual to VM Physical * * Input Args: * vm - Virtual Machine @@ -1057,7 +979,8 @@ vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) return -1; } -/* VM Create IRQ Chip +/* + * VM Create IRQ Chip * * Input Args: * vm - Virtual Machine @@ -1079,7 +1002,8 @@ void vm_create_irqchip(struct kvm_vm *vm) vm->has_irqchip = true; } -/* VM VCPU State +/* + * VM VCPU State * * Input Args: * vm - Virtual Machine @@ -1101,7 +1025,8 @@ struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid) return vcpu->state; } -/* VM VCPU Run +/* + * VM VCPU Run * * Input Args: * vm - Virtual Machine @@ -1127,13 +1052,14 @@ int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) int rc; TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); - do { + do { rc = ioctl(vcpu->fd, KVM_RUN, NULL); } while (rc == -1 && errno == EINTR); return rc; } -/* VM VCPU Set MP State +/* + * VM VCPU Set MP State * * Input Args: * vm - Virtual Machine @@ -1148,7 +1074,7 @@ int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) * by mp_state. */ void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid, - struct kvm_mp_state *mp_state) + struct kvm_mp_state *mp_state) { struct vcpu *vcpu = vcpu_find(vm, vcpuid); int ret; @@ -1160,7 +1086,8 @@ void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid, "rc: %i errno: %i", ret, errno); } -/* VM VCPU Regs Get +/* + * VM VCPU Regs Get * * Input Args: * vm - Virtual Machine @@ -1174,21 +1101,20 @@ void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid, * Obtains the current register state for the VCPU specified by vcpuid * and stores it at the location given by regs. */ -void vcpu_regs_get(struct kvm_vm *vm, - uint32_t vcpuid, struct kvm_regs *regs) +void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs) { struct vcpu *vcpu = vcpu_find(vm, vcpuid); int ret; TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); - /* Get the regs. */ ret = ioctl(vcpu->fd, KVM_GET_REGS, regs); TEST_ASSERT(ret == 0, "KVM_GET_REGS failed, rc: %i errno: %i", ret, errno); } -/* VM VCPU Regs Set +/* + * VM VCPU Regs Set * * Input Args: * vm - Virtual Machine @@ -1202,165 +1128,46 @@ void vcpu_regs_get(struct kvm_vm *vm, * Sets the regs of the VCPU specified by vcpuid to the values * given by regs. */ -void vcpu_regs_set(struct kvm_vm *vm, - uint32_t vcpuid, struct kvm_regs *regs) +void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs) { struct vcpu *vcpu = vcpu_find(vm, vcpuid); int ret; TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); - /* Set the regs. */ ret = ioctl(vcpu->fd, KVM_SET_REGS, regs); TEST_ASSERT(ret == 0, "KVM_SET_REGS failed, rc: %i errno: %i", ret, errno); } void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid, - struct kvm_vcpu_events *events) + struct kvm_vcpu_events *events) { struct vcpu *vcpu = vcpu_find(vm, vcpuid); int ret; TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); - /* Get the regs. */ ret = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, events); TEST_ASSERT(ret == 0, "KVM_GET_VCPU_EVENTS, failed, rc: %i errno: %i", ret, errno); } void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, - struct kvm_vcpu_events *events) + struct kvm_vcpu_events *events) { struct vcpu *vcpu = vcpu_find(vm, vcpuid); int ret; TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); - /* Set the regs. */ ret = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, events); TEST_ASSERT(ret == 0, "KVM_SET_VCPU_EVENTS, failed, rc: %i errno: %i", ret, errno); } -/* VCPU Get MSR - * - * Input Args: - * vm - Virtual Machine - * vcpuid - VCPU ID - * msr_index - Index of MSR - * - * Output Args: None - * - * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced. - * - * Get value of MSR for VCPU. - */ -uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index) -{ - struct vcpu *vcpu = vcpu_find(vm, vcpuid); - struct { - struct kvm_msrs header; - struct kvm_msr_entry entry; - } buffer = {}; - int r; - - TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); - buffer.header.nmsrs = 1; - buffer.entry.index = msr_index; - r = ioctl(vcpu->fd, KVM_GET_MSRS, &buffer.header); - TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n" - " rc: %i errno: %i", r, errno); - - return buffer.entry.data; -} - -/* VCPU Set MSR - * - * Input Args: - * vm - Virtual Machine - * vcpuid - VCPU ID - * msr_index - Index of MSR - * msr_value - New value of MSR - * - * Output Args: None - * - * Return: On success, nothing. On failure a TEST_ASSERT is produced. - * - * Set value of MSR for VCPU. - */ -void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index, - uint64_t msr_value) -{ - struct vcpu *vcpu = vcpu_find(vm, vcpuid); - struct { - struct kvm_msrs header; - struct kvm_msr_entry entry; - } buffer = {}; - int r; - - TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); - memset(&buffer, 0, sizeof(buffer)); - buffer.header.nmsrs = 1; - buffer.entry.index = msr_index; - buffer.entry.data = msr_value; - r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header); - TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n" - " rc: %i errno: %i", r, errno); -} - -/* VM VCPU Args Set - * - * Input Args: - * vm - Virtual Machine - * vcpuid - VCPU ID - * num - number of arguments - * ... - arguments, each of type uint64_t - * - * Output Args: None - * - * Return: None - * - * Sets the first num function input arguments to the values - * given as variable args. Each of the variable args is expected to - * be of type uint64_t. - */ -void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...) -{ - va_list ap; - struct kvm_regs regs; - - TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n" - " num: %u\n", - num); - - va_start(ap, num); - vcpu_regs_get(vm, vcpuid, ®s); - - if (num >= 1) - regs.rdi = va_arg(ap, uint64_t); - - if (num >= 2) - regs.rsi = va_arg(ap, uint64_t); - - if (num >= 3) - regs.rdx = va_arg(ap, uint64_t); - - if (num >= 4) - regs.rcx = va_arg(ap, uint64_t); - - if (num >= 5) - regs.r8 = va_arg(ap, uint64_t); - - if (num >= 6) - regs.r9 = va_arg(ap, uint64_t); - - vcpu_regs_set(vm, vcpuid, ®s); - va_end(ap); -} - -/* VM VCPU System Regs Get +/* + * VM VCPU System Regs Get * * Input Args: * vm - Virtual Machine @@ -1374,22 +1181,20 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...) * Obtains the current system register state for the VCPU specified by * vcpuid and stores it at the location given by sregs. */ -void vcpu_sregs_get(struct kvm_vm *vm, - uint32_t vcpuid, struct kvm_sregs *sregs) +void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) { struct vcpu *vcpu = vcpu_find(vm, vcpuid); int ret; TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); - /* Get the regs. */ - /* Get the regs. */ ret = ioctl(vcpu->fd, KVM_GET_SREGS, sregs); TEST_ASSERT(ret == 0, "KVM_GET_SREGS failed, rc: %i errno: %i", ret, errno); } -/* VM VCPU System Regs Set +/* + * VM VCPU System Regs Set * * Input Args: * vm - Virtual Machine @@ -1403,27 +1208,25 @@ void vcpu_sregs_get(struct kvm_vm *vm, * Sets the system regs of the VCPU specified by vcpuid to the values * given by sregs. */ -void vcpu_sregs_set(struct kvm_vm *vm, - uint32_t vcpuid, struct kvm_sregs *sregs) +void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) { int ret = _vcpu_sregs_set(vm, vcpuid, sregs); TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, " "rc: %i errno: %i", ret, errno); } -int _vcpu_sregs_set(struct kvm_vm *vm, - uint32_t vcpuid, struct kvm_sregs *sregs) +int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) { struct vcpu *vcpu = vcpu_find(vm, vcpuid); int ret; TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); - /* Get the regs. */ return ioctl(vcpu->fd, KVM_SET_SREGS, sregs); } -/* VCPU Ioctl +/* + * VCPU Ioctl * * Input Args: * vm - Virtual Machine @@ -1435,8 +1238,8 @@ int _vcpu_sregs_set(struct kvm_vm *vm, * * Issues an arbitrary ioctl on a VCPU fd. */ -void vcpu_ioctl(struct kvm_vm *vm, - uint32_t vcpuid, unsigned long cmd, void *arg) +void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, + unsigned long cmd, void *arg) { struct vcpu *vcpu = vcpu_find(vm, vcpuid); int ret; @@ -1448,7 +1251,8 @@ void vcpu_ioctl(struct kvm_vm *vm, cmd, ret, errno, strerror(errno)); } -/* VM Ioctl +/* + * VM Ioctl * * Input Args: * vm - Virtual Machine @@ -1468,7 +1272,8 @@ void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg) cmd, ret, errno, strerror(errno)); } -/* VM Dump +/* + * VM Dump * * Input Args: * vm - Virtual Machine @@ -1515,38 +1320,6 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) vcpu_dump(stream, vm, vcpu->id, indent + 2); } -/* VM VCPU Dump - * - * Input Args: - * vm - Virtual Machine - * vcpuid - VCPU ID - * indent - Left margin indent amount - * - * Output Args: - * stream - Output FILE stream - * - * Return: None - * - * Dumps the current state of the VCPU specified by vcpuid, within the VM - * given by vm, to the FILE stream given by stream. - */ -void vcpu_dump(FILE *stream, struct kvm_vm *vm, - uint32_t vcpuid, uint8_t indent) -{ - struct kvm_regs regs; - struct kvm_sregs sregs; - - fprintf(stream, "%*scpuid: %u\n", indent, "", vcpuid); - - fprintf(stream, "%*sregs:\n", indent + 2, ""); - vcpu_regs_get(vm, vcpuid, ®s); - regs_dump(stream, ®s, indent + 4); - - fprintf(stream, "%*ssregs:\n", indent + 2, ""); - vcpu_sregs_get(vm, vcpuid, &sregs); - sregs_dump(stream, &sregs, indent + 4); -} - /* Known KVM exit reasons */ static struct exit_reason { unsigned int reason; @@ -1577,7 +1350,8 @@ static struct exit_reason { #endif }; -/* Exit Reason String +/* + * Exit Reason String * * Input Args: * exit_reason - Exit reason @@ -1603,7 +1377,8 @@ const char *exit_reason_str(unsigned int exit_reason) return "Unknown"; } -/* Physical Page Allocate +/* + * Physical Page Allocate * * Input Args: * vm - Virtual Machine @@ -1620,8 +1395,8 @@ const char *exit_reason_str(unsigned int exit_reason) * and its address is returned. A TEST_ASSERT failure occurs if no * page is available at or above paddr_min. */ -vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, - vm_paddr_t paddr_min, uint32_t memslot) +vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, + uint32_t memslot) { struct userspace_mem_region *region; sparsebit_idx_t pg; @@ -1631,17 +1406,15 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, " paddr_min: 0x%lx page_size: 0x%x", paddr_min, vm->page_size); - /* Locate memory region. */ region = memslot2region(vm, memslot); - - /* Locate next available physical page at or above paddr_min. */ pg = paddr_min >> vm->page_shift; + /* Locate next available physical page at or above paddr_min. */ if (!sparsebit_is_set(region->unused_phy_pages, pg)) { pg = sparsebit_next_set(region->unused_phy_pages, pg); if (pg == 0) { fprintf(stderr, "No guest physical page available, " - "paddr_min: 0x%lx page_size: 0x%x memslot: %u", + "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n", paddr_min, vm->page_size, memslot); fputs("---- vm dump ----\n", stderr); vm_dump(stderr, vm, 2); @@ -1655,7 +1428,8 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, return pg * vm->page_size; } -/* Address Guest Virtual to Host Virtual +/* + * Address Guest Virtual to Host Virtual * * Input Args: * vm - Virtual Machine diff --git a/tools/testing/selftests/kvm/lib/kvm_util_internal.h b/tools/testing/selftests/kvm/lib/kvm_util_internal.h index 36367be0fe26..3702c560bbf2 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util_internal.h +++ b/tools/testing/selftests/kvm/lib/kvm_util_internal.h @@ -11,18 +11,19 @@ #include "sparsebit.h" +#define KVM_DEV_PATH "/dev/kvm" + #ifndef BITS_PER_BYTE -#define BITS_PER_BYTE 8 +#define BITS_PER_BYTE 8 #endif #ifndef BITS_PER_LONG -#define BITS_PER_LONG (BITS_PER_BYTE * sizeof(long)) +#define BITS_PER_LONG (BITS_PER_BYTE * sizeof(long)) #endif #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) -#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG) +#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG) -/* Concrete definition of struct kvm_vm. */ struct userspace_mem_region { struct userspace_mem_region *next, *prev; struct kvm_userspace_memory_region region; @@ -53,7 +54,6 @@ struct kvm_vm { struct userspace_mem_region *userspace_mem_region_head; struct sparsebit *vpages_valid; struct sparsebit *vpages_mapped; - bool has_irqchip; bool pgd_created; vm_paddr_t pgd; @@ -61,13 +61,11 @@ struct kvm_vm { vm_vaddr_t tss; }; -struct vcpu *vcpu_find(struct kvm_vm *vm, - uint32_t vcpuid); -void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot); +struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid); +void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, + int gdt_memslot); void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); -void regs_dump(FILE *stream, struct kvm_regs *regs, - uint8_t indent); -void sregs_dump(FILE *stream, struct kvm_sregs *sregs, - uint8_t indent); +void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent); +void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent); #endif /* SELFTEST_KVM_UTIL_INTERNAL_H */ diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index 79c2c5c203c0..67f84fbb0ca4 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -672,6 +672,102 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code) vcpu_set_mp_state(vm, vcpuid, &mp_state); } +/* Allocate an instance of struct kvm_cpuid2 + * + * Input Args: None + * + * Output Args: None + * + * Return: A pointer to the allocated struct. The caller is responsible + * for freeing this struct. + * + * Since kvm_cpuid2 uses a 0-length array to allow a the size of the + * array to be decided at allocation time, allocation is slightly + * complicated. This function uses a reasonable default length for + * the array and performs the appropriate allocation. + */ +static struct kvm_cpuid2 *allocate_kvm_cpuid2(void) +{ + struct kvm_cpuid2 *cpuid; + int nent = 100; + size_t size; + + size = sizeof(*cpuid); + size += nent * sizeof(struct kvm_cpuid_entry2); + cpuid = malloc(size); + if (!cpuid) { + perror("malloc"); + abort(); + } + + cpuid->nent = nent; + + return cpuid; +} + +/* KVM Supported CPUID Get + * + * Input Args: None + * + * Output Args: + * + * Return: The supported KVM CPUID + * + * Get the guest CPUID supported by KVM. + */ +struct kvm_cpuid2 *kvm_get_supported_cpuid(void) +{ + static struct kvm_cpuid2 *cpuid; + int ret; + int kvm_fd; + + if (cpuid) + return cpuid; + + cpuid = allocate_kvm_cpuid2(); + kvm_fd = open(KVM_DEV_PATH, O_RDONLY); + if (kvm_fd < 0) + exit(KSFT_SKIP); + + ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid); + TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n", + ret, errno); + + close(kvm_fd); + return cpuid; +} + +/* Locate a cpuid entry. + * + * Input Args: + * cpuid: The cpuid. + * function: The function of the cpuid entry to find. + * + * Output Args: None + * + * Return: A pointer to the cpuid entry. Never returns NULL. + */ +struct kvm_cpuid_entry2 * +kvm_get_supported_cpuid_index(uint32_t function, uint32_t index) +{ + struct kvm_cpuid2 *cpuid; + struct kvm_cpuid_entry2 *entry = NULL; + int i; + + cpuid = kvm_get_supported_cpuid(); + for (i = 0; i < cpuid->nent; i++) { + if (cpuid->entries[i].function == function && + cpuid->entries[i].index == index) { + entry = &cpuid->entries[i]; + break; + } + } + + TEST_ASSERT(entry, "Guest CPUID entry not found: (EAX=%x, ECX=%x).", + function, index); + return entry; +} + /* VM VCPU CPUID Set * * Input Args: @@ -698,6 +794,7 @@ void vcpu_set_cpuid(struct kvm_vm *vm, rc, errno); } + /* Create a VM with reasonable defaults * * Input Args: @@ -742,6 +839,154 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages, return vm; } +/* VCPU Get MSR + * + * Input Args: + * vm - Virtual Machine + * vcpuid - VCPU ID + * msr_index - Index of MSR + * + * Output Args: None + * + * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced. + * + * Get value of MSR for VCPU. + */ +uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index) +{ + struct vcpu *vcpu = vcpu_find(vm, vcpuid); + struct { + struct kvm_msrs header; + struct kvm_msr_entry entry; + } buffer = {}; + int r; + + TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); + buffer.header.nmsrs = 1; + buffer.entry.index = msr_index; + r = ioctl(vcpu->fd, KVM_GET_MSRS, &buffer.header); + TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n" + " rc: %i errno: %i", r, errno); + + return buffer.entry.data; +} + +/* VCPU Set MSR + * + * Input Args: + * vm - Virtual Machine + * vcpuid - VCPU ID + * msr_index - Index of MSR + * msr_value - New value of MSR + * + * Output Args: None + * + * Return: On success, nothing. On failure a TEST_ASSERT is produced. + * + * Set value of MSR for VCPU. + */ +void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index, + uint64_t msr_value) +{ + struct vcpu *vcpu = vcpu_find(vm, vcpuid); + struct { + struct kvm_msrs header; + struct kvm_msr_entry entry; + } buffer = {}; + int r; + + TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); + memset(&buffer, 0, sizeof(buffer)); + buffer.header.nmsrs = 1; + buffer.entry.index = msr_index; + buffer.entry.data = msr_value; + r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header); + TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n" + " rc: %i errno: %i", r, errno); +} + +/* VM VCPU Args Set + * + * Input Args: + * vm - Virtual Machine + * vcpuid - VCPU ID + * num - number of arguments + * ... - arguments, each of type uint64_t + * + * Output Args: None + * + * Return: None + * + * Sets the first num function input arguments to the values + * given as variable args. Each of the variable args is expected to + * be of type uint64_t. + */ +void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...) +{ + va_list ap; + struct kvm_regs regs; + + TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n" + " num: %u\n", + num); + + va_start(ap, num); + vcpu_regs_get(vm, vcpuid, ®s); + + if (num >= 1) + regs.rdi = va_arg(ap, uint64_t); + + if (num >= 2) + regs.rsi = va_arg(ap, uint64_t); + + if (num >= 3) + regs.rdx = va_arg(ap, uint64_t); + + if (num >= 4) + regs.rcx = va_arg(ap, uint64_t); + + if (num >= 5) + regs.r8 = va_arg(ap, uint64_t); + + if (num >= 6) + regs.r9 = va_arg(ap, uint64_t); + + vcpu_regs_set(vm, vcpuid, ®s); + va_end(ap); +} + +/* + * VM VCPU Dump + * + * Input Args: + * vm - Virtual Machine + * vcpuid - VCPU ID + * indent - Left margin indent amount + * + * Output Args: + * stream - Output FILE stream + * + * Return: None + * + * Dumps the current state of the VCPU specified by vcpuid, within the VM + * given by vm, to the FILE stream given by stream. + */ +void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent) +{ + struct kvm_regs regs; + struct kvm_sregs sregs; + + fprintf(stream, "%*scpuid: %u\n", indent, "", vcpuid); + + fprintf(stream, "%*sregs:\n", indent + 2, ""); + vcpu_regs_get(vm, vcpuid, ®s); + regs_dump(stream, ®s, indent + 4); + + fprintf(stream, "%*ssregs:\n", indent + 2, ""); + vcpu_sregs_get(vm, vcpuid, &sregs); + sregs_dump(stream, &sregs, indent + 4); +} + struct kvm_x86_state { struct kvm_vcpu_events events; struct kvm_mp_state mp_state; diff --git a/tools/testing/selftests/kvm/x86_64/dirty_log_test.c b/tools/testing/selftests/kvm/x86_64/dirty_log_test.c index 7cf3e4ae6046..395a6e07d37c 100644 --- a/tools/testing/selftests/kvm/x86_64/dirty_log_test.c +++ b/tools/testing/selftests/kvm/x86_64/dirty_log_test.c @@ -15,6 +15,7 @@ #include "test_util.h" #include "kvm_util.h" +#include "processor.h" #define DEBUG printf From d5106539cfef362e3870b6aa8b5ca587cd67e93d Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 18 Sep 2018 19:54:29 +0200 Subject: [PATCH 106/204] kvm: selftests: add vm_phy_pages_alloc Signed-off-by: Andrew Jones Signed-off-by: Paolo Bonzini --- .../testing/selftests/kvm/include/kvm_util.h | 2 + tools/testing/selftests/kvm/lib/kvm_util.c | 58 ++++++++++++------- 2 files changed, 38 insertions(+), 22 deletions(-) diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 4d5e405ff246..e0c9cf4d1c49 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -110,6 +110,8 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, uint32_t pgd_memslot); vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, uint32_t memslot); +vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, + vm_paddr_t paddr_min, uint32_t memslot); struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_size, void *guest_code); diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 1a8f8f9e3635..26c3c4350a88 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -1378,10 +1378,11 @@ const char *exit_reason_str(unsigned int exit_reason) } /* - * Physical Page Allocate + * Physical Contiguous Page Allocator * * Input Args: * vm - Virtual Machine + * num - number of pages * paddr_min - Physical address minimum * memslot - Memory region to allocate page from * @@ -1390,16 +1391,18 @@ const char *exit_reason_str(unsigned int exit_reason) * Return: * Starting physical address * - * Within the VM specified by vm, locates an available physical page - * at or above paddr_min. If found, the page is marked as in use - * and its address is returned. A TEST_ASSERT failure occurs if no - * page is available at or above paddr_min. + * Within the VM specified by vm, locates a range of available physical + * pages at or above paddr_min. If found, the pages are marked as in use + * and thier base address is returned. A TEST_ASSERT failure occurs if + * not enough pages are available at or above paddr_min. */ -vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, - uint32_t memslot) +vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, + vm_paddr_t paddr_min, uint32_t memslot) { struct userspace_mem_region *region; - sparsebit_idx_t pg; + sparsebit_idx_t pg, base; + + TEST_ASSERT(num > 0, "Must allocate at least one page"); TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " "not divisible by page size.\n" @@ -1407,25 +1410,36 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, paddr_min, vm->page_size); region = memslot2region(vm, memslot); - pg = paddr_min >> vm->page_shift; + base = pg = paddr_min >> vm->page_shift; - /* Locate next available physical page at or above paddr_min. */ - if (!sparsebit_is_set(region->unused_phy_pages, pg)) { - pg = sparsebit_next_set(region->unused_phy_pages, pg); - if (pg == 0) { - fprintf(stderr, "No guest physical page available, " - "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n", - paddr_min, vm->page_size, memslot); - fputs("---- vm dump ----\n", stderr); - vm_dump(stderr, vm, 2); - abort(); + do { + for (; pg < base + num; ++pg) { + if (!sparsebit_is_set(region->unused_phy_pages, pg)) { + base = pg = sparsebit_next_set(region->unused_phy_pages, pg); + break; + } } + } while (pg && pg != base + num); + + if (pg == 0) { + fprintf(stderr, "No guest physical page available, " + "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n", + paddr_min, vm->page_size, memslot); + fputs("---- vm dump ----\n", stderr); + vm_dump(stderr, vm, 2); + abort(); } - /* Specify page as in use and return its address. */ - sparsebit_clear(region->unused_phy_pages, pg); + for (pg = base; pg < base + num; ++pg) + sparsebit_clear(region->unused_phy_pages, pg); - return pg * vm->page_size; + return base * vm->page_size; +} + +vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, + uint32_t memslot) +{ + return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); } /* From 7a6629ef746d1a50cc9247bd7c92ce6b38b8ed68 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 18 Sep 2018 19:54:30 +0200 Subject: [PATCH 107/204] kvm: selftests: add virt mem support for aarch64 Signed-off-by: Andrew Jones Signed-off-by: Paolo Bonzini --- .../selftests/kvm/lib/aarch64/processor.c | 216 ++++++++++++++++++ tools/testing/selftests/kvm/lib/kvm_util.c | 1 + .../selftests/kvm/lib/kvm_util_internal.h | 1 + 3 files changed, 218 insertions(+) create mode 100644 tools/testing/selftests/kvm/lib/aarch64/processor.c diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c new file mode 100644 index 000000000000..464d4d074249 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c @@ -0,0 +1,216 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * AArch64 code + * + * Copyright (C) 2018, Red Hat, Inc. + */ + +#include "kvm_util.h" +#include "../kvm_util_internal.h" + +#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000 + +static uint64_t page_align(struct kvm_vm *vm, uint64_t v) +{ + return (v + vm->page_size) & ~(vm->page_size - 1); +} + +static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) +{ + unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; + uint64_t mask = (1UL << (vm->va_bits - shift)) - 1; + + return (gva >> shift) & mask; +} + +static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) +{ + unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift; + uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; + + TEST_ASSERT(vm->pgtable_levels == 4, + "Mode %d does not have 4 page table levels", vm->mode); + + return (gva >> shift) & mask; +} + +static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva) +{ + unsigned int shift = (vm->page_shift - 3) + vm->page_shift; + uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; + + TEST_ASSERT(vm->pgtable_levels >= 3, + "Mode %d does not have >= 3 page table levels", vm->mode); + + return (gva >> shift) & mask; +} + +static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva) +{ + uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; + return (gva >> vm->page_shift) & mask; +} + +static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry) +{ + uint64_t mask = ((1UL << (vm->va_bits - vm->page_shift)) - 1) << vm->page_shift; + return entry & mask; +} + +static uint64_t ptrs_per_pgd(struct kvm_vm *vm) +{ + unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; + return 1 << (vm->va_bits - shift); +} + +static uint64_t ptrs_per_pte(struct kvm_vm *vm) +{ + return 1 << (vm->page_shift - 3); +} + +void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot) +{ + int rc; + + if (!vm->pgd_created) { + vm_paddr_t paddr = vm_phy_pages_alloc(vm, + page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size, + KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot); + vm->pgd = paddr; + vm->pgd_created = true; + } +} + +void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, + uint32_t pgd_memslot, uint64_t flags) +{ + uint8_t attr_idx = flags & 7; + uint64_t *ptep; + + TEST_ASSERT((vaddr % vm->page_size) == 0, + "Virtual address not on page boundary,\n" + " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size); + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, + (vaddr >> vm->page_shift)), + "Invalid virtual address, vaddr: 0x%lx", vaddr); + TEST_ASSERT((paddr % vm->page_size) == 0, + "Physical address not on page boundary,\n" + " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); + TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, + "Physical address beyond beyond maximum supported,\n" + " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", + paddr, vm->max_gfn, vm->page_size); + + ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8; + if (!*ptep) { + *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot); + *ptep |= 3; + } + + switch (vm->pgtable_levels) { + case 4: + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8; + if (!*ptep) { + *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot); + *ptep |= 3; + } + /* fall through */ + case 3: + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8; + if (!*ptep) { + *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot); + *ptep |= 3; + } + /* fall through */ + case 2: + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8; + break; + default: + TEST_ASSERT(false, "Page table levels must be 2, 3, or 4"); + } + + *ptep = paddr | 3; + *ptep |= (attr_idx << 2) | (1 << 10) /* Access Flag */; +} + +void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, + uint32_t pgd_memslot) +{ + uint64_t attr_idx = 4; /* NORMAL (See DEFAULT_MAIR_EL1) */ + + _virt_pg_map(vm, vaddr, paddr, pgd_memslot, attr_idx); +} + +vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) +{ + uint64_t *ptep; + + if (!vm->pgd_created) + goto unmapped_gva; + + ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8; + if (!ptep) + goto unmapped_gva; + + switch (vm->pgtable_levels) { + case 4: + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8; + if (!ptep) + goto unmapped_gva; + /* fall through */ + case 3: + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8; + if (!ptep) + goto unmapped_gva; + /* fall through */ + case 2: + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8; + if (!ptep) + goto unmapped_gva; + break; + default: + TEST_ASSERT(false, "Page table levels must be 2, 3, or 4"); + } + + return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1)); + +unmapped_gva: + TEST_ASSERT(false, "No mapping for vm virtual address, " + "gva: 0x%lx", gva); +} + +static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level) +{ +#ifdef DEBUG_VM + static const char * const type[] = { "", "pud", "pmd", "pte" }; + uint64_t pte, *ptep; + + if (level == 4) + return; + + for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) { + ptep = addr_gpa2hva(vm, pte); + if (!*ptep) + continue; + printf("%*s%s: %lx: %lx at %p\n", indent, "", type[level], pte, *ptep, ptep); + pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level + 1); + } +#endif +} + +void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) +{ + int level = 4 - (vm->pgtable_levels - 1); + uint64_t pgd, *ptep; + + if (!vm->pgd_created) + return; + + for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pgd(vm) * 8; pgd += 8) { + ptep = addr_gpa2hva(vm, pgd); + if (!*ptep) + continue; + printf("%*spgd: %lx: %lx at %p\n", indent, "", pgd, *ptep, ptep); + pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level); + } +} diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 26c3c4350a88..fd346fb82e46 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -129,6 +129,7 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) /* Setup mode specific traits. */ switch (vm->mode) { case VM_MODE_FLAT48PG: + vm->pgtable_levels = 4; vm->page_size = 0x1000; vm->page_shift = 12; vm->va_bits = 48; diff --git a/tools/testing/selftests/kvm/lib/kvm_util_internal.h b/tools/testing/selftests/kvm/lib/kvm_util_internal.h index 3702c560bbf2..d048d7363d66 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util_internal.h +++ b/tools/testing/selftests/kvm/lib/kvm_util_internal.h @@ -46,6 +46,7 @@ struct kvm_vm { int mode; int kvm_fd; int fd; + unsigned int pgtable_levels; unsigned int page_size; unsigned int page_shift; unsigned int va_bits; From 0bec140fb6c106a70aac183e5fa99de0ef1d161a Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 18 Sep 2018 19:54:31 +0200 Subject: [PATCH 108/204] kvm: selftests: add vcpu support for aarch64 This code adds VM and VCPU setup code for the VM_MODE_FLAT48PG mode. The VM_MODE_FLAT48PG isn't yet fully supportable, as it defines the guest physical address limit as 52-bits, and KVM currently only supports guests with up to 40-bit physical addresses (see KVM_PHYS_SHIFT). VM_MODE_FLAT48PG will work fine, though, as long as no >= 40-bit physical addresses are used. Signed-off-by: Andrew Jones Signed-off-by: Paolo Bonzini --- .../selftests/kvm/include/aarch64/processor.h | 55 ++++++++++++ .../selftests/kvm/lib/aarch64/processor.c | 83 +++++++++++++++++++ 2 files changed, 138 insertions(+) create mode 100644 tools/testing/selftests/kvm/include/aarch64/processor.h diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h new file mode 100644 index 000000000000..9ef2ab1a0c08 --- /dev/null +++ b/tools/testing/selftests/kvm/include/aarch64/processor.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * AArch64 processor specific defines + * + * Copyright (C) 2018, Red Hat, Inc. + */ +#ifndef SELFTEST_KVM_PROCESSOR_H +#define SELFTEST_KVM_PROCESSOR_H + +#include "kvm_util.h" + + +#define ARM64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ + KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) + +#define CPACR_EL1 3, 0, 1, 0, 2 +#define TCR_EL1 3, 0, 2, 0, 2 +#define MAIR_EL1 3, 0, 10, 2, 0 +#define TTBR0_EL1 3, 0, 2, 0, 0 +#define SCTLR_EL1 3, 0, 1, 0, 0 + +/* + * Default MAIR + * index attribute + * DEVICE_nGnRnE 0 0000:0000 + * DEVICE_nGnRE 1 0000:0100 + * DEVICE_GRE 2 0000:1100 + * NORMAL_NC 3 0100:0100 + * NORMAL 4 1111:1111 + * NORMAL_WT 5 1011:1011 + */ +#define DEFAULT_MAIR_EL1 ((0x00ul << (0 * 8)) | \ + (0x04ul << (1 * 8)) | \ + (0x0cul << (2 * 8)) | \ + (0x44ul << (3 * 8)) | \ + (0xfful << (4 * 8)) | \ + (0xbbul << (5 * 8))) + +static inline void get_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, uint64_t *addr) +{ + struct kvm_one_reg reg; + reg.id = id; + reg.addr = (uint64_t)addr; + vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, ®); +} + +static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, uint64_t val) +{ + struct kvm_one_reg reg; + reg.id = id; + reg.addr = (uint64_t)&val; + vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, ®); +} + +#endif /* SELFTEST_KVM_PROCESSOR_H */ diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c index 464d4d074249..871fe2173679 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/processor.c +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c @@ -5,10 +5,14 @@ * Copyright (C) 2018, Red Hat, Inc. */ +#define _GNU_SOURCE /* for program_invocation_name */ + #include "kvm_util.h" #include "../kvm_util_internal.h" +#include "processor.h" #define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000 +#define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000 static uint64_t page_align(struct kvm_vm *vm, uint64_t v) { @@ -214,3 +218,82 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level); } } + +struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages, + void *guest_code) +{ + uint64_t ptrs_per_4k_pte = 512; + uint64_t extra_pg_pages = (extra_mem_pages / ptrs_per_4k_pte) * 2; + struct kvm_vm *vm; + + vm = vm_create(VM_MODE_FLAT48PG, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR); + + kvm_vm_elf_load(vm, program_invocation_name, 0, 0); + vm_vcpu_add_default(vm, vcpuid, guest_code); + + return vm; +} + +void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code) +{ + size_t stack_size = vm->page_size == 4096 ? + DEFAULT_STACK_PGS * vm->page_size : + vm->page_size; + uint64_t stack_vaddr = vm_vaddr_alloc(vm, stack_size, + DEFAULT_ARM64_GUEST_STACK_VADDR_MIN, 0, 0); + + vm_vcpu_add(vm, vcpuid, 0, 0); + + set_reg(vm, vcpuid, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size); + set_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code); +} + +void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot) +{ + struct kvm_vcpu_init init; + uint64_t sctlr_el1, tcr_el1; + + memset(&init, 0, sizeof(init)); + init.target = KVM_ARM_TARGET_GENERIC_V8; + vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_INIT, &init); + + /* + * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15 + * registers, which the variable argument list macros do. + */ + set_reg(vm, vcpuid, ARM64_SYS_REG(CPACR_EL1), 3 << 20); + + get_reg(vm, vcpuid, ARM64_SYS_REG(SCTLR_EL1), &sctlr_el1); + get_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), &tcr_el1); + + switch (vm->mode) { + case VM_MODE_FLAT48PG: + tcr_el1 |= 0ul << 14; /* TG0 = 4KB */ + tcr_el1 |= 6ul << 32; /* IPS = 52 bits */ + break; + default: + TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", vm->mode); + } + + sctlr_el1 |= (1 << 0) | (1 << 2) | (1 << 12) /* M | C | I */; + /* TCR_EL1 |= IRGN0:WBWA | ORGN0:WBWA | SH0:Inner-Shareable */; + tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12); + tcr_el1 |= (64 - vm->va_bits) /* T0SZ */; + + set_reg(vm, vcpuid, ARM64_SYS_REG(SCTLR_EL1), sctlr_el1); + set_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), tcr_el1); + set_reg(vm, vcpuid, ARM64_SYS_REG(MAIR_EL1), DEFAULT_MAIR_EL1); + set_reg(vm, vcpuid, ARM64_SYS_REG(TTBR0_EL1), vm->pgd); +} + +void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent) +{ + uint64_t pstate, pc; + + get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pstate), &pstate); + get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &pc); + + fprintf(stream, "%*spstate: 0x%.16llx pc: 0x%.16llx\n", + indent, "", pstate, pc); + +} From 81d1cca0c062a589953eef7e98f1e68b8222a918 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 18 Sep 2018 19:54:33 +0200 Subject: [PATCH 109/204] kvm: selftests: introduce new VM mode for 64K pages Rename VM_MODE_FLAT48PG to be more descriptive of its config and add a new config that has the same parameters, except with 64K pages. Signed-off-by: Andrew Jones Signed-off-by: Paolo Bonzini --- .../testing/selftests/kvm/include/kvm_util.h | 7 ++- .../selftests/kvm/lib/aarch64/processor.c | 8 +++- tools/testing/selftests/kvm/lib/kvm_util.c | 46 ++++++++++++------- .../selftests/kvm/lib/kvm_util_internal.h | 1 + .../selftests/kvm/lib/x86_64/processor.c | 10 ++-- 5 files changed, 47 insertions(+), 25 deletions(-) diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index e0c9cf4d1c49..88df26c41845 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -34,9 +34,14 @@ typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */ #define DEFAULT_STACK_PGS 5 enum vm_guest_mode { - VM_MODE_FLAT48PG, + VM_MODE_P52V48_4K, + VM_MODE_P52V48_64K, + NUM_VM_MODES, }; +#define vm_guest_mode_string(m) vm_guest_mode_string[m] +extern const char * const vm_guest_mode_string[]; + enum vm_mem_backing_src_type { VM_MEM_SRC_ANONYMOUS, VM_MEM_SRC_ANONYMOUS_THP, diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c index 871fe2173679..b1dfc0d4b68e 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/processor.c +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c @@ -226,7 +226,7 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages, uint64_t extra_pg_pages = (extra_mem_pages / ptrs_per_4k_pte) * 2; struct kvm_vm *vm; - vm = vm_create(VM_MODE_FLAT48PG, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR); + vm = vm_create(VM_MODE_P52V48_4K, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR); kvm_vm_elf_load(vm, program_invocation_name, 0, 0); vm_vcpu_add_default(vm, vcpuid, guest_code); @@ -267,10 +267,14 @@ void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot) get_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), &tcr_el1); switch (vm->mode) { - case VM_MODE_FLAT48PG: + case VM_MODE_P52V48_4K: tcr_el1 |= 0ul << 14; /* TG0 = 4KB */ tcr_el1 |= 6ul << 32; /* IPS = 52 bits */ break; + case VM_MODE_P52V48_64K: + tcr_el1 |= 1ul << 14; /* TG0 = 64KB */ + tcr_el1 |= 6ul << 32; /* IPS = 52 bits */ + break; default: TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", vm->mode); } diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index fd346fb82e46..b37e52f4daad 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -17,7 +17,7 @@ #include #define KVM_UTIL_PGS_PER_HUGEPG 512 -#define KVM_UTIL_MIN_PADDR 0x2000 +#define KVM_UTIL_MIN_PFN 2 /* Aligns x up to the next multiple of size. Size must be a power of 2. */ static void *align(void *x, size_t size) @@ -96,11 +96,16 @@ static void vm_open(struct kvm_vm *vm, int perm) "rc: %i errno: %i", vm->fd, errno); } +const char * const vm_guest_mode_string[] = { + "PA-bits:52, VA-bits:48, 4K pages", + "PA-bits:52, VA-bits:48, 64K pages", +}; + /* * VM Create * * Input Args: - * mode - VM Mode (e.g. VM_MODE_FLAT48PG) + * mode - VM Mode (e.g. VM_MODE_P52V48_4K) * phy_pages - Physical memory pages * perm - permission * @@ -109,7 +114,7 @@ static void vm_open(struct kvm_vm *vm, int perm) * Return: * Pointer to opaque structure that describes the created VM. * - * Creates a VM with the mode specified by mode (e.g. VM_MODE_FLAT48PG). + * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K). * When phy_pages is non-zero, a memory region of phy_pages physical pages * is created and mapped starting at guest physical address 0. The file * descriptor to control the created VM is created with the permissions @@ -128,28 +133,34 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) /* Setup mode specific traits. */ switch (vm->mode) { - case VM_MODE_FLAT48PG: + case VM_MODE_P52V48_4K: vm->pgtable_levels = 4; vm->page_size = 0x1000; vm->page_shift = 12; vm->va_bits = 48; - - /* Limit to 48-bit canonical virtual addresses. */ - vm->vpages_valid = sparsebit_alloc(); - sparsebit_set_num(vm->vpages_valid, - 0, (1ULL << (48 - 1)) >> vm->page_shift); - sparsebit_set_num(vm->vpages_valid, - (~((1ULL << (48 - 1)) - 1)) >> vm->page_shift, - (1ULL << (48 - 1)) >> vm->page_shift); - - /* Limit physical addresses to 52-bits. */ - vm->max_gfn = ((1ULL << 52) >> vm->page_shift) - 1; break; - + case VM_MODE_P52V48_64K: + vm->pgtable_levels = 3; + vm->pa_bits = 52; + vm->page_size = 0x10000; + vm->page_shift = 16; + vm->va_bits = 48; + break; default: TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", mode); } + /* Limit to VA-bit canonical virtual addresses. */ + vm->vpages_valid = sparsebit_alloc(); + sparsebit_set_num(vm->vpages_valid, + 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); + sparsebit_set_num(vm->vpages_valid, + (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift, + (1ULL << (vm->va_bits - 1)) >> vm->page_shift); + + /* Limit physical addresses to PA-bits. */ + vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; + /* Allocate and setup memory for guest. */ vm->vpages_mapped = sparsebit_alloc(); if (phy_pages != 0) @@ -868,7 +879,8 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, pages--, vaddr += vm->page_size) { vm_paddr_t paddr; - paddr = vm_phy_page_alloc(vm, KVM_UTIL_MIN_PADDR, data_memslot); + paddr = vm_phy_page_alloc(vm, + KVM_UTIL_MIN_PFN * vm->page_size, data_memslot); virt_pg_map(vm, vaddr, paddr, pgd_memslot); diff --git a/tools/testing/selftests/kvm/lib/kvm_util_internal.h b/tools/testing/selftests/kvm/lib/kvm_util_internal.h index d048d7363d66..52701db0f253 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util_internal.h +++ b/tools/testing/selftests/kvm/lib/kvm_util_internal.h @@ -49,6 +49,7 @@ struct kvm_vm { unsigned int pgtable_levels; unsigned int page_size; unsigned int page_shift; + unsigned int pa_bits; unsigned int va_bits; uint64_t max_gfn; struct vcpu *vcpu_head; diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index 67f84fbb0ca4..f28127f4a3af 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -231,7 +231,7 @@ void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot) { int rc; - TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use " + TEST_ASSERT(vm->mode == VM_MODE_P52V48_4K, "Attempt to use " "unknown or unsupported guest mode, mode: 0x%x", vm->mode); /* If needed, create page map l4 table. */ @@ -264,7 +264,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, uint16_t index[4]; struct pageMapL4Entry *pml4e; - TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use " + TEST_ASSERT(vm->mode == VM_MODE_P52V48_4K, "Attempt to use " "unknown or unsupported guest mode, mode: 0x%x", vm->mode); TEST_ASSERT((vaddr % vm->page_size) == 0, @@ -551,7 +551,7 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) struct pageTableEntry *pte; void *hva; - TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use " + TEST_ASSERT(vm->mode == VM_MODE_P52V48_4K, "Attempt to use " "unknown or unsupported guest mode, mode: 0x%x", vm->mode); index[0] = (gva >> 12) & 0x1ffu; @@ -624,7 +624,7 @@ void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot) kvm_setup_gdt(vm, &sregs.gdt, gdt_memslot, pgd_memslot); switch (vm->mode) { - case VM_MODE_FLAT48PG: + case VM_MODE_P52V48_4K: sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG; sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR; sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX); @@ -823,7 +823,7 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages, uint64_t extra_pg_pages = extra_mem_pages / 512 * 2; /* Create VM */ - vm = vm_create(VM_MODE_FLAT48PG, + vm = vm_create(VM_MODE_P52V48_4K, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR); From fff8dcd7b4a242e3752d6f316967b02f933031d0 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 18 Sep 2018 19:54:32 +0200 Subject: [PATCH 110/204] kvm: selftests: port dirty_log_test to aarch64 While we're messing with the code for the port and to support guest page sizes that are less than the host page size, we also make some code formatting cleanups and apply sync_global_to_guest(). Signed-off-by: Andrew Jones Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/.gitignore | 2 +- tools/testing/selftests/kvm/Makefile | 5 +- .../kvm/{x86_64 => }/dirty_log_test.c | 163 +++++++++--------- 3 files changed, 90 insertions(+), 80 deletions(-) rename tools/testing/selftests/kvm/{x86_64 => }/dirty_log_test.c (65%) diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index e0aa5411cf9b..552b0d9a49f4 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -4,4 +4,4 @@ /x86_64/sync_regs_test /x86_64/vmx_tsc_adjust_test /x86_64/state_test -/x86_64/dirty_log_test +/dirty_log_test diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 959be146f789..5e23f97ec46f 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -5,6 +5,7 @@ UNAME_M := $(shell uname -m) LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/ucall.c lib/sparsebit.c LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c +LIBKVM_aarch64 = lib/aarch64/processor.c TEST_GEN_PROGS_x86_64 = x86_64/platform_info_test TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test @@ -12,7 +13,9 @@ TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test TEST_GEN_PROGS_x86_64 += x86_64/cr4_cpuid_sync_test TEST_GEN_PROGS_x86_64 += x86_64/state_test -TEST_GEN_PROGS_x86_64 += x86_64/dirty_log_test +TEST_GEN_PROGS_x86_64 += dirty_log_test + +TEST_GEN_PROGS_aarch64 += dirty_log_test TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M)) LIBKVM += $(LIBKVM_$(UNAME_M)) diff --git a/tools/testing/selftests/kvm/x86_64/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c similarity index 65% rename from tools/testing/selftests/kvm/x86_64/dirty_log_test.c rename to tools/testing/selftests/kvm/dirty_log_test.c index 395a6e07d37c..d90f1091f687 100644 --- a/tools/testing/selftests/kvm/x86_64/dirty_log_test.c +++ b/tools/testing/selftests/kvm/dirty_log_test.c @@ -17,75 +17,75 @@ #include "kvm_util.h" #include "processor.h" -#define DEBUG printf +#define DEBUG printf + +#define VCPU_ID 1 -#define VCPU_ID 1 /* The memory slot index to track dirty pages */ -#define TEST_MEM_SLOT_INDEX 1 +#define TEST_MEM_SLOT_INDEX 1 + /* * GPA offset of the testing memory slot. Must be bigger than the * default vm mem slot, which is DEFAULT_GUEST_PHY_PAGES. */ -#define TEST_MEM_OFFSET (1ULL << 30) /* 1G */ +#define TEST_MEM_OFFSET (1ul << 30) /* 1G */ + /* Size of the testing memory slot */ -#define TEST_MEM_PAGES (1ULL << 18) /* 1G for 4K pages */ +#define TEST_MEM_PAGES (1ul << 18) /* 1G for 4K pages */ + /* How many pages to dirty for each guest loop */ -#define TEST_PAGES_PER_LOOP 1024 +#define TEST_PAGES_PER_LOOP 1024 + /* How many host loops to run (one KVM_GET_DIRTY_LOG for each loop) */ -#define TEST_HOST_LOOP_N 32 +#define TEST_HOST_LOOP_N 32 + /* Interval for each host loop (ms) */ -#define TEST_HOST_LOOP_INTERVAL 10 +#define TEST_HOST_LOOP_INTERVAL 10 /* - * Guest variables. We use these variables to share data between host - * and guest. There are two copies of the variables, one in host memory - * (which is unused) and one in guest memory. When the host wants to - * access these variables, it needs to call addr_gva2hva() to access the - * guest copy. + * Guest/Host shared variables. Ensure addr_gva2hva() and/or + * sync_global_to/from_guest() are used when accessing from + * the host. READ/WRITE_ONCE() should also be used with anything + * that may change. */ -uint64_t guest_random_array[TEST_PAGES_PER_LOOP]; -uint64_t guest_iteration; -uint64_t guest_page_size; +static uint64_t host_page_size; +static uint64_t guest_page_size; +static uint64_t random_array[TEST_PAGES_PER_LOOP]; +static uint64_t iteration; /* - * Writes to the first byte of a random page within the testing memory - * region continuously. + * Continuously write to the first 8 bytes of a random pages within + * the testing memory region. */ -void guest_code(void) +static void guest_code(void) { - int i = 0; - uint64_t volatile *array = guest_random_array; - uint64_t volatile *guest_addr; + int i; while (true) { for (i = 0; i < TEST_PAGES_PER_LOOP; i++) { - /* - * Write to the first 8 bytes of a random page - * on the testing memory region. - */ - guest_addr = (uint64_t *) - (TEST_MEM_OFFSET + - (array[i] % TEST_MEM_PAGES) * guest_page_size); - *guest_addr = guest_iteration; + uint64_t addr = TEST_MEM_OFFSET; + addr += (READ_ONCE(random_array[i]) % TEST_MEM_PAGES) + * guest_page_size; + addr &= ~(host_page_size - 1); + *(uint64_t *)addr = READ_ONCE(iteration); } + /* Tell the host that we need more random numbers */ GUEST_SYNC(1); } } -/* - * Host variables. These variables should only be used by the host - * rather than the guest. - */ -bool host_quit; +/* Host variables */ +static bool host_quit; /* Points to the test VM memory region on which we track dirty logs */ -void *host_test_mem; +static void *host_test_mem; +static uint64_t host_num_pages; /* For statistics only */ -uint64_t host_dirty_count; -uint64_t host_clear_count; -uint64_t host_track_next_count; +static uint64_t host_dirty_count; +static uint64_t host_clear_count; +static uint64_t host_track_next_count; /* * We use this bitmap to track some pages that should have its dirty @@ -94,39 +94,34 @@ uint64_t host_track_next_count; * page bit is cleared in the latest bitmap, then the system must * report that write in the next get dirty log call. */ -unsigned long *host_bmap_track; +static unsigned long *host_bmap_track; -void generate_random_array(uint64_t *guest_array, uint64_t size) +static void generate_random_array(uint64_t *guest_array, uint64_t size) { uint64_t i; - for (i = 0; i < size; i++) { + for (i = 0; i < size; i++) guest_array[i] = random(); - } } -void *vcpu_worker(void *data) +static void *vcpu_worker(void *data) { int ret; - uint64_t loops, *guest_array, pages_count = 0; struct kvm_vm *vm = data; + uint64_t *guest_array; + uint64_t pages_count = 0; struct kvm_run *run; struct ucall uc; run = vcpu_state(vm, VCPU_ID); - /* Retrieve the guest random array pointer and cache it */ - guest_array = addr_gva2hva(vm, (vm_vaddr_t)guest_random_array); - - DEBUG("VCPU starts\n"); - + guest_array = addr_gva2hva(vm, (vm_vaddr_t)random_array); generate_random_array(guest_array, TEST_PAGES_PER_LOOP); while (!READ_ONCE(host_quit)) { - /* Let the guest to dirty these random pages */ + /* Let the guest dirty the random pages */ ret = _vcpu_run(vm, VCPU_ID); - if (run->exit_reason == KVM_EXIT_IO && - get_ucall(vm, VCPU_ID, &uc) == UCALL_SYNC) { + if (get_ucall(vm, VCPU_ID, &uc) == UCALL_SYNC) { pages_count += TEST_PAGES_PER_LOOP; generate_random_array(guest_array, TEST_PAGES_PER_LOOP); } else { @@ -137,18 +132,18 @@ void *vcpu_worker(void *data) } } - DEBUG("VCPU exits, dirtied %"PRIu64" pages\n", pages_count); + DEBUG("Dirtied %"PRIu64" pages\n", pages_count); return NULL; } -void vm_dirty_log_verify(unsigned long *bmap, uint64_t iteration) +static void vm_dirty_log_verify(unsigned long *bmap) { uint64_t page; - uint64_t volatile *value_ptr; + uint64_t *value_ptr; - for (page = 0; page < TEST_MEM_PAGES; page++) { - value_ptr = host_test_mem + page * getpagesize(); + for (page = 0; page < host_num_pages; page++) { + value_ptr = host_test_mem + page * host_page_size; /* If this is a special page that we were tracking... */ if (test_and_clear_bit(page, host_bmap_track)) { @@ -208,7 +203,7 @@ void vm_dirty_log_verify(unsigned long *bmap, uint64_t iteration) } } -void help(char *name) +static void help(char *name) { puts(""); printf("usage: %s [-i iterations] [-I interval] [-h]\n", name); @@ -225,9 +220,9 @@ int main(int argc, char *argv[]) { pthread_t vcpu_thread; struct kvm_vm *vm; - uint64_t volatile *psize, *iteration; - unsigned long *bmap, iterations = TEST_HOST_LOOP_N, - interval = TEST_HOST_LOOP_INTERVAL; + unsigned long iterations = TEST_HOST_LOOP_N; + unsigned long interval = TEST_HOST_LOOP_INTERVAL; + unsigned long *bmap; int opt; while ((opt = getopt(argc, argv, "hi:I:")) != -1) { @@ -245,16 +240,21 @@ int main(int argc, char *argv[]) } } - TEST_ASSERT(iterations > 2, "Iteration must be bigger than zero\n"); - TEST_ASSERT(interval > 0, "Interval must be bigger than zero"); + TEST_ASSERT(iterations > 2, "Iterations must be greater than two"); + TEST_ASSERT(interval > 0, "Interval must be greater than zero"); DEBUG("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n", iterations, interval); srandom(time(0)); - bmap = bitmap_alloc(TEST_MEM_PAGES); - host_bmap_track = bitmap_alloc(TEST_MEM_PAGES); + guest_page_size = 4096; + host_page_size = getpagesize(); + host_num_pages = (TEST_MEM_PAGES * guest_page_size) / host_page_size + + !!((TEST_MEM_PAGES * guest_page_size) % host_page_size); + + bmap = bitmap_alloc(host_num_pages); + host_bmap_track = bitmap_alloc(host_num_pages); vm = vm_create_default(VCPU_ID, TEST_MEM_PAGES, guest_code); @@ -264,32 +264,38 @@ int main(int argc, char *argv[]) TEST_MEM_SLOT_INDEX, TEST_MEM_PAGES, KVM_MEM_LOG_DIRTY_PAGES); - /* Cache the HVA pointer of the region */ - host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)TEST_MEM_OFFSET); /* Do 1:1 mapping for the dirty track memory slot */ virt_map(vm, TEST_MEM_OFFSET, TEST_MEM_OFFSET, - TEST_MEM_PAGES * getpagesize(), 0); + TEST_MEM_PAGES * guest_page_size, 0); + /* Cache the HVA pointer of the region */ + host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)TEST_MEM_OFFSET); + +#ifdef __x86_64__ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); +#endif +#ifdef __aarch64__ + ucall_init(vm, UCALL_MMIO, NULL); +#endif - /* Tell the guest about the page size on the system */ - psize = addr_gva2hva(vm, (vm_vaddr_t)&guest_page_size); - *psize = getpagesize(); + /* Tell the guest about the page sizes */ + sync_global_to_guest(vm, host_page_size); + sync_global_to_guest(vm, guest_page_size); /* Start the iterations */ - iteration = addr_gva2hva(vm, (vm_vaddr_t)&guest_iteration); - *iteration = 1; + iteration = 1; + sync_global_to_guest(vm, iteration); - /* Start dirtying pages */ pthread_create(&vcpu_thread, NULL, vcpu_worker, vm); - while (*iteration < iterations) { + while (iteration < iterations) { /* Give the vcpu thread some time to dirty some pages */ usleep(interval * 1000); kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap); - vm_dirty_log_verify(bmap, *iteration); - (*iteration)++; + vm_dirty_log_verify(bmap); + iteration++; + sync_global_to_guest(vm, iteration); } /* Tell the vcpu thread to quit */ @@ -302,6 +308,7 @@ int main(int argc, char *argv[]) free(bmap); free(host_bmap_track); + ucall_uninit(vm); kvm_vm_free(vm); return 0; From e1b376f140ad9cbb1e1976f347377b8ef08a5bc9 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 18 Sep 2018 19:54:34 +0200 Subject: [PATCH 111/204] kvm: selftests: dirty_log_test: also test 64K pages on aarch64 Signed-off-by: Andrew Jones Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/dirty_log_test.c | 183 ++++++++++++++----- 1 file changed, 137 insertions(+), 46 deletions(-) diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c index d90f1091f687..3afc7d607a2e 100644 --- a/tools/testing/selftests/kvm/dirty_log_test.c +++ b/tools/testing/selftests/kvm/dirty_log_test.c @@ -5,6 +5,8 @@ * Copyright (C) 2018, Red Hat, Inc. */ +#define _GNU_SOURCE /* for program_invocation_name */ + #include #include #include @@ -30,9 +32,6 @@ */ #define TEST_MEM_OFFSET (1ul << 30) /* 1G */ -/* Size of the testing memory slot */ -#define TEST_MEM_PAGES (1ul << 18) /* 1G for 4K pages */ - /* How many pages to dirty for each guest loop */ #define TEST_PAGES_PER_LOOP 1024 @@ -50,6 +49,7 @@ */ static uint64_t host_page_size; static uint64_t guest_page_size; +static uint64_t guest_num_pages; static uint64_t random_array[TEST_PAGES_PER_LOOP]; static uint64_t iteration; @@ -64,7 +64,7 @@ static void guest_code(void) while (true) { for (i = 0; i < TEST_PAGES_PER_LOOP; i++) { uint64_t addr = TEST_MEM_OFFSET; - addr += (READ_ONCE(random_array[i]) % TEST_MEM_PAGES) + addr += (READ_ONCE(random_array[i]) % guest_num_pages) * guest_page_size; addr &= ~(host_page_size - 1); *(uint64_t *)addr = READ_ONCE(iteration); @@ -141,8 +141,10 @@ static void vm_dirty_log_verify(unsigned long *bmap) { uint64_t page; uint64_t *value_ptr; + uint64_t step = host_page_size >= guest_page_size ? 1 : + guest_page_size / host_page_size; - for (page = 0; page < host_num_pages; page++) { + for (page = 0; page < host_num_pages; page += step) { value_ptr = host_test_mem + page * host_page_size; /* If this is a special page that we were tracking... */ @@ -203,71 +205,64 @@ static void vm_dirty_log_verify(unsigned long *bmap) } } -static void help(char *name) +static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid, + uint64_t extra_mem_pages, void *guest_code) { - puts(""); - printf("usage: %s [-i iterations] [-I interval] [-h]\n", name); - puts(""); - printf(" -i: specify iteration counts (default: %"PRIu64")\n", - TEST_HOST_LOOP_N); - printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n", - TEST_HOST_LOOP_INTERVAL); - puts(""); - exit(0); + struct kvm_vm *vm; + uint64_t extra_pg_pages = extra_mem_pages / 512 * 2; + + vm = vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR); + kvm_vm_elf_load(vm, program_invocation_name, 0, 0); +#ifdef __x86_64__ + vm_create_irqchip(vm); +#endif + vm_vcpu_add_default(vm, vcpuid, guest_code); + return vm; } -int main(int argc, char *argv[]) +static void run_test(enum vm_guest_mode mode, unsigned long iterations, + unsigned long interval) { + unsigned int guest_page_shift; pthread_t vcpu_thread; struct kvm_vm *vm; - unsigned long iterations = TEST_HOST_LOOP_N; - unsigned long interval = TEST_HOST_LOOP_INTERVAL; unsigned long *bmap; - int opt; - while ((opt = getopt(argc, argv, "hi:I:")) != -1) { - switch (opt) { - case 'i': - iterations = strtol(optarg, NULL, 10); - break; - case 'I': - interval = strtol(optarg, NULL, 10); - break; - case 'h': - default: - help(argv[0]); - break; - } + switch (mode) { + case VM_MODE_P52V48_4K: + guest_page_shift = 12; + break; + case VM_MODE_P52V48_64K: + guest_page_shift = 16; + break; + default: + TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", mode); } - TEST_ASSERT(iterations > 2, "Iterations must be greater than two"); - TEST_ASSERT(interval > 0, "Interval must be greater than zero"); + DEBUG("Testing guest mode: %s\n", vm_guest_mode_string(mode)); - DEBUG("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n", - iterations, interval); - - srandom(time(0)); - - guest_page_size = 4096; + guest_page_size = (1ul << guest_page_shift); + /* 1G of guest page sized pages */ + guest_num_pages = (1ul << (30 - guest_page_shift)); host_page_size = getpagesize(); - host_num_pages = (TEST_MEM_PAGES * guest_page_size) / host_page_size + - !!((TEST_MEM_PAGES * guest_page_size) % host_page_size); + host_num_pages = (guest_num_pages * guest_page_size) / host_page_size + + !!((guest_num_pages * guest_page_size) % host_page_size); bmap = bitmap_alloc(host_num_pages); host_bmap_track = bitmap_alloc(host_num_pages); - vm = vm_create_default(VCPU_ID, TEST_MEM_PAGES, guest_code); + vm = create_vm(mode, VCPU_ID, guest_num_pages, guest_code); /* Add an extra memory slot for testing dirty logging */ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, TEST_MEM_OFFSET, TEST_MEM_SLOT_INDEX, - TEST_MEM_PAGES, + guest_num_pages, KVM_MEM_LOG_DIRTY_PAGES); /* Do 1:1 mapping for the dirty track memory slot */ virt_map(vm, TEST_MEM_OFFSET, TEST_MEM_OFFSET, - TEST_MEM_PAGES * guest_page_size, 0); + guest_num_pages * guest_page_size, 0); /* Cache the HVA pointer of the region */ host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)TEST_MEM_OFFSET); @@ -279,13 +274,18 @@ int main(int argc, char *argv[]) ucall_init(vm, UCALL_MMIO, NULL); #endif - /* Tell the guest about the page sizes */ + /* Export the shared variables to the guest */ sync_global_to_guest(vm, host_page_size); sync_global_to_guest(vm, guest_page_size); + sync_global_to_guest(vm, guest_num_pages); /* Start the iterations */ iteration = 1; sync_global_to_guest(vm, iteration); + host_quit = false; + host_dirty_count = 0; + host_clear_count = 0; + host_track_next_count = 0; pthread_create(&vcpu_thread, NULL, vcpu_worker, vm); @@ -310,6 +310,97 @@ int main(int argc, char *argv[]) free(host_bmap_track); ucall_uninit(vm); kvm_vm_free(vm); +} + +static struct vm_guest_modes { + enum vm_guest_mode mode; + bool supported; + bool enabled; +} vm_guest_modes[NUM_VM_MODES] = { + { VM_MODE_P52V48_4K, 1, 1, }, +#ifdef __aarch64__ + { VM_MODE_P52V48_64K, 1, 1, }, +#else + { VM_MODE_P52V48_64K, 0, 0, }, +#endif +}; + +static void help(char *name) +{ + int i; + + puts(""); + printf("usage: %s [-h] [-i iterations] [-I interval] [-m mode]\n", name); + puts(""); + printf(" -i: specify iteration counts (default: %"PRIu64")\n", + TEST_HOST_LOOP_N); + printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n", + TEST_HOST_LOOP_INTERVAL); + printf(" -m: specify the guest mode ID to test " + "(default: test all supported modes)\n" + " This option may be used multiple times.\n" + " Guest mode IDs:\n"); + for (i = 0; i < NUM_VM_MODES; ++i) { + printf(" %d: %s%s\n", + vm_guest_modes[i].mode, + vm_guest_mode_string(vm_guest_modes[i].mode), + vm_guest_modes[i].supported ? " (supported)" : ""); + } + puts(""); + exit(0); +} + +int main(int argc, char *argv[]) +{ + unsigned long iterations = TEST_HOST_LOOP_N; + unsigned long interval = TEST_HOST_LOOP_INTERVAL; + bool mode_selected = false; + unsigned int mode; + int opt, i; + + while ((opt = getopt(argc, argv, "hi:I:m:")) != -1) { + switch (opt) { + case 'i': + iterations = strtol(optarg, NULL, 10); + break; + case 'I': + interval = strtol(optarg, NULL, 10); + break; + case 'm': + if (!mode_selected) { + for (i = 0; i < NUM_VM_MODES; ++i) + vm_guest_modes[i].enabled = 0; + mode_selected = true; + } + mode = strtoul(optarg, NULL, 10); + TEST_ASSERT(mode < NUM_VM_MODES, + "Guest mode ID %d too big", mode); + vm_guest_modes[mode].enabled = 1; + break; + case 'h': + default: + help(argv[0]); + break; + } + } + + TEST_ASSERT(iterations > 2, "Iterations must be greater than two"); + TEST_ASSERT(interval > 0, "Interval must be greater than zero"); + + DEBUG("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n", + iterations, interval); + + srandom(time(0)); + + for (i = 0; i < NUM_VM_MODES; ++i) { + if (!vm_guest_modes[i].enabled) + continue; + TEST_ASSERT(vm_guest_modes[i].supported, + "Guest mode ID %d (%s) not supported.", + vm_guest_modes[i].mode, + vm_guest_mode_string(vm_guest_modes[i].mode)); + run_test(vm_guest_modes[i].mode, iterations, interval); + } return 0; } From e28934e661c1d9425f1cc41b405dcd8626507206 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 18 Sep 2018 19:54:35 +0200 Subject: [PATCH 112/204] kvm: selftests: stop lying to aarch64 tests about PA-bits Let's add the 40 PA-bit versions of the VM modes, that AArch64 should have been using, so we can extend the dirty log test without breaking things. Signed-off-by: Andrew Jones Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/dirty_log_test.c | 13 ++++++++++--- tools/testing/selftests/kvm/include/kvm_util.h | 2 ++ .../selftests/kvm/lib/aarch64/processor.c | 8 ++++++++ tools/testing/selftests/kvm/lib/kvm_util.c | 16 ++++++++++++++++ 4 files changed, 36 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c index 3afc7d607a2e..61396882ad4e 100644 --- a/tools/testing/selftests/kvm/dirty_log_test.c +++ b/tools/testing/selftests/kvm/dirty_log_test.c @@ -230,9 +230,11 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations, switch (mode) { case VM_MODE_P52V48_4K: + case VM_MODE_P40V48_4K: guest_page_shift = 12; break; case VM_MODE_P52V48_64K: + case VM_MODE_P40V48_64K: guest_page_shift = 16; break; default: @@ -317,11 +319,16 @@ static struct vm_guest_modes { bool supported; bool enabled; } vm_guest_modes[NUM_VM_MODES] = { +#if defined(__x86_64__) { VM_MODE_P52V48_4K, 1, 1, }, -#ifdef __aarch64__ - { VM_MODE_P52V48_64K, 1, 1, }, -#else { VM_MODE_P52V48_64K, 0, 0, }, + { VM_MODE_P40V48_4K, 0, 0, }, + { VM_MODE_P40V48_64K, 0, 0, }, +#elif defined(__aarch64__) + { VM_MODE_P52V48_4K, 0, 0, }, + { VM_MODE_P52V48_64K, 0, 0, }, + { VM_MODE_P40V48_4K, 1, 1, }, + { VM_MODE_P40V48_64K, 1, 1, }, #endif }; diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 88df26c41845..a4e59e3b4826 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -36,6 +36,8 @@ typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */ enum vm_guest_mode { VM_MODE_P52V48_4K, VM_MODE_P52V48_64K, + VM_MODE_P40V48_4K, + VM_MODE_P40V48_64K, NUM_VM_MODES, }; diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c index b1dfc0d4b68e..b6022e2f116e 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/processor.c +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c @@ -275,6 +275,14 @@ void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot) tcr_el1 |= 1ul << 14; /* TG0 = 64KB */ tcr_el1 |= 6ul << 32; /* IPS = 52 bits */ break; + case VM_MODE_P40V48_4K: + tcr_el1 |= 0ul << 14; /* TG0 = 4KB */ + tcr_el1 |= 2ul << 32; /* IPS = 40 bits */ + break; + case VM_MODE_P40V48_64K: + tcr_el1 |= 1ul << 14; /* TG0 = 64KB */ + tcr_el1 |= 2ul << 32; /* IPS = 40 bits */ + break; default: TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", vm->mode); } diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index b37e52f4daad..8c06da4f03db 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -99,6 +99,8 @@ static void vm_open(struct kvm_vm *vm, int perm) const char * const vm_guest_mode_string[] = { "PA-bits:52, VA-bits:48, 4K pages", "PA-bits:52, VA-bits:48, 64K pages", + "PA-bits:40, VA-bits:48, 4K pages", + "PA-bits:40, VA-bits:48, 64K pages", }; /* @@ -146,6 +148,20 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) vm->page_shift = 16; vm->va_bits = 48; break; + case VM_MODE_P40V48_4K: + vm->pgtable_levels = 4; + vm->pa_bits = 40; + vm->va_bits = 48; + vm->page_size = 0x1000; + vm->page_shift = 12; + break; + case VM_MODE_P40V48_64K: + vm->pgtable_levels = 3; + vm->pa_bits = 40; + vm->va_bits = 48; + vm->page_size = 0x10000; + vm->page_shift = 16; + break; default: TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", mode); } From 5b8ee8792f6bda46978e6e86fda4650bcbae07ab Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 18 Sep 2018 19:54:36 +0200 Subject: [PATCH 113/204] kvm: selftests: support high GPAs in dirty_log_test Signed-off-by: Andrew Jones Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/dirty_log_test.c | 65 +++++++++++++++----- 1 file changed, 50 insertions(+), 15 deletions(-) diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c index 61396882ad4e..d59820cc2d39 100644 --- a/tools/testing/selftests/kvm/dirty_log_test.c +++ b/tools/testing/selftests/kvm/dirty_log_test.c @@ -26,11 +26,8 @@ /* The memory slot index to track dirty pages */ #define TEST_MEM_SLOT_INDEX 1 -/* - * GPA offset of the testing memory slot. Must be bigger than the - * default vm mem slot, which is DEFAULT_GUEST_PHY_PAGES. - */ -#define TEST_MEM_OFFSET (1ul << 30) /* 1G */ +/* Default guest test memory offset, 1G */ +#define DEFAULT_GUEST_TEST_MEM 0x40000000 /* How many pages to dirty for each guest loop */ #define TEST_PAGES_PER_LOOP 1024 @@ -53,6 +50,12 @@ static uint64_t guest_num_pages; static uint64_t random_array[TEST_PAGES_PER_LOOP]; static uint64_t iteration; +/* + * GPA offset of the testing memory slot. Must be bigger than + * DEFAULT_GUEST_PHY_PAGES. + */ +static uint64_t guest_test_mem = DEFAULT_GUEST_TEST_MEM; + /* * Continuously write to the first 8 bytes of a random pages within * the testing memory region. @@ -63,7 +66,7 @@ static void guest_code(void) while (true) { for (i = 0; i < TEST_PAGES_PER_LOOP; i++) { - uint64_t addr = TEST_MEM_OFFSET; + uint64_t addr = guest_test_mem; addr += (READ_ONCE(random_array[i]) % guest_num_pages) * guest_page_size; addr &= ~(host_page_size - 1); @@ -221,20 +224,29 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid, } static void run_test(enum vm_guest_mode mode, unsigned long iterations, - unsigned long interval) + unsigned long interval, bool top_offset) { - unsigned int guest_page_shift; + unsigned int guest_pa_bits, guest_page_shift; pthread_t vcpu_thread; struct kvm_vm *vm; + uint64_t max_gfn; unsigned long *bmap; switch (mode) { case VM_MODE_P52V48_4K: - case VM_MODE_P40V48_4K: + guest_pa_bits = 52; guest_page_shift = 12; break; case VM_MODE_P52V48_64K: + guest_pa_bits = 52; + guest_page_shift = 16; + break; + case VM_MODE_P40V48_4K: + guest_pa_bits = 40; + guest_page_shift = 12; + break; case VM_MODE_P40V48_64K: + guest_pa_bits = 40; guest_page_shift = 16; break; default: @@ -243,6 +255,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations, DEBUG("Testing guest mode: %s\n", vm_guest_mode_string(mode)); + max_gfn = (1ul << (guest_pa_bits - guest_page_shift)) - 1; guest_page_size = (1ul << guest_page_shift); /* 1G of guest page sized pages */ guest_num_pages = (1ul << (30 - guest_page_shift)); @@ -250,6 +263,13 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations, host_num_pages = (guest_num_pages * guest_page_size) / host_page_size + !!((guest_num_pages * guest_page_size) % host_page_size); + if (top_offset) { + guest_test_mem = (max_gfn - guest_num_pages) * guest_page_size; + guest_test_mem &= ~(host_page_size - 1); + } + + DEBUG("guest test mem offset: 0x%lx\n", guest_test_mem); + bmap = bitmap_alloc(host_num_pages); host_bmap_track = bitmap_alloc(host_num_pages); @@ -257,17 +277,17 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations, /* Add an extra memory slot for testing dirty logging */ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, - TEST_MEM_OFFSET, + guest_test_mem, TEST_MEM_SLOT_INDEX, guest_num_pages, KVM_MEM_LOG_DIRTY_PAGES); /* Do 1:1 mapping for the dirty track memory slot */ - virt_map(vm, TEST_MEM_OFFSET, TEST_MEM_OFFSET, + virt_map(vm, guest_test_mem, guest_test_mem, guest_num_pages * guest_page_size, 0); /* Cache the HVA pointer of the region */ - host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)TEST_MEM_OFFSET); + host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_mem); #ifdef __x86_64__ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); @@ -279,6 +299,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations, /* Export the shared variables to the guest */ sync_global_to_guest(vm, host_page_size); sync_global_to_guest(vm, guest_page_size); + sync_global_to_guest(vm, guest_test_mem); sync_global_to_guest(vm, guest_num_pages); /* Start the iterations */ @@ -337,12 +358,17 @@ static void help(char *name) int i; puts(""); - printf("usage: %s [-h] [-i iterations] [-I interval] [-m mode]\n", name); + printf("usage: %s [-h] [-i iterations] [-I interval] " + "[-o offset] [-t] [-m mode]\n", name); puts(""); printf(" -i: specify iteration counts (default: %"PRIu64")\n", TEST_HOST_LOOP_N); printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n", TEST_HOST_LOOP_INTERVAL); + printf(" -o: guest test memory offset (default: 0x%lx)\n", + DEFAULT_GUEST_TEST_MEM); + printf(" -t: map guest test memory at the top of the allowed " + "physical address range\n"); printf(" -m: specify the guest mode ID to test " "(default: test all supported modes)\n" " This option may be used multiple times.\n" @@ -362,10 +388,11 @@ int main(int argc, char *argv[]) unsigned long iterations = TEST_HOST_LOOP_N; unsigned long interval = TEST_HOST_LOOP_INTERVAL; bool mode_selected = false; + bool top_offset = false; unsigned int mode; int opt, i; - while ((opt = getopt(argc, argv, "hi:I:m:")) != -1) { + while ((opt = getopt(argc, argv, "hi:I:o:tm:")) != -1) { switch (opt) { case 'i': iterations = strtol(optarg, NULL, 10); @@ -373,6 +400,12 @@ int main(int argc, char *argv[]) case 'I': interval = strtol(optarg, NULL, 10); break; + case 'o': + guest_test_mem = strtoull(optarg, NULL, 0); + break; + case 't': + top_offset = true; + break; case 'm': if (!mode_selected) { for (i = 0; i < NUM_VM_MODES; ++i) @@ -393,6 +426,8 @@ int main(int argc, char *argv[]) TEST_ASSERT(iterations > 2, "Iterations must be greater than two"); TEST_ASSERT(interval > 0, "Interval must be greater than zero"); + TEST_ASSERT(!top_offset || guest_test_mem == DEFAULT_GUEST_TEST_MEM, + "Cannot use both -o [offset] and -t at the same time"); DEBUG("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n", iterations, interval); @@ -406,7 +441,7 @@ int main(int argc, char *argv[]) "Guest mode ID %d (%s) not supported.", vm_guest_modes[i].mode, vm_guest_mode_string(vm_guest_modes[i].mode)); - run_test(vm_guest_modes[i].mode, iterations, interval); + run_test(vm_guest_modes[i].mode, iterations, interval, top_offset); } return 0; From cfb634fe3052aefc4e1360fa322018c9a0b49755 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Fri, 21 Sep 2018 10:36:17 -0700 Subject: [PATCH 114/204] KVM: nVMX: Clear reserved bits of #DB exit qualification According to volume 3 of the SDM, bits 63:15 and 12:4 of the exit qualification field for debug exceptions are reserved (cleared to 0). However, the SDM is incorrect about bit 16 (corresponding to DR6.RTM). This bit should be set if a debug exception (#DB) or a breakpoint exception (#BP) occurred inside an RTM region while advanced debugging of RTM transactional regions was enabled. Note that this is the opposite of DR6.RTM, which "indicates (when clear) that a debug exception (#DB) or breakpoint exception (#BP) occurred inside an RTM region while advanced debugging of RTM transactional regions was enabled." There is still an issue with stale DR6 bits potentially being misreported for the current debug exception. DR6 should not have been modified before vectoring the #DB exception, and the "new DR6 bits" should be available somewhere, but it was and they aren't. Fixes: b96fb439774e1 ("KVM: nVMX: fixes to nested virt interrupt injection") Signed-off-by: Jim Mattson Reviewed-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/vmx.c | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 09b2e3e2cf1b..1c09a0d1771f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -177,6 +177,7 @@ enum { #define DR6_BD (1 << 13) #define DR6_BS (1 << 14) +#define DR6_BT (1 << 15) #define DR6_RTM (1 << 16) #define DR6_FIXED_1 0xfffe0ff0 #define DR6_INIT 0xffff0ff0 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index b3c5517a89b3..14d446366ca5 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3290,10 +3290,13 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit } } else { if (vmcs12->exception_bitmap & (1u << nr)) { - if (nr == DB_VECTOR) + if (nr == DB_VECTOR) { *exit_qual = vcpu->arch.dr6; - else + *exit_qual &= ~(DR6_FIXED_1 | DR6_BT); + *exit_qual ^= DR6_RTM; + } else { *exit_qual = 0; + } return 1; } } From bd18bffca35397214ae68d85cf7203aca25c3c1d Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 22 Aug 2018 14:57:07 -0700 Subject: [PATCH 115/204] KVM: nVMX: restore host state in nested_vmx_vmexit for VMFail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A VMEnter that VMFails (as opposed to VMExits) does not touch host state beyond registers that are explicitly noted in the VMFail path, e.g. EFLAGS. Host state does not need to be loaded because VMFail is only signaled for consistency checks that occur before the CPU starts to load guest state, i.e. there is no need to restore any state as nothing has been modified. But in the case where a VMFail is detected by hardware and not by KVM (due to deferring consistency checks to hardware), KVM has already loaded some amount of guest state. Luckily, "loaded" only means loaded to KVM's software model, i.e. vmcs01 has not been modified. So, unwind our software model to the pre-VMEntry host state. Not restoring host state in this VMFail path leads to a variety of failures because we end up with stale data in vcpu->arch, e.g. CR0, CR4, EFER, etc... will all be out of sync relative to vmcs01. Any significant delta in the stale data is all but guaranteed to crash L1, e.g. emulation of SMEP, SMAP, UMIP, WP, etc... will be wrong. An alternative to this "soft" reload would be to load host state from vmcs12 as if we triggered a VMExit (as opposed to VMFail), but that is wildly inconsistent with respect to the VMX architecture, e.g. an L1 VMM with separate VMExit and VMFail paths would explode. Note that this approach does not mean KVM is 100% accurate with respect to VMX hardware behavior, even at an architectural level (the exact order of consistency checks is microarchitecture specific). But 100% emulation accuracy isn't the goal (with this patch), rather the goal is to be consistent in the information delivered to L1, e.g. a VMExit should not fall-through VMENTER, and a VMFail should not jump to HOST_RIP. This technically reverts commit "5af4157388ad (KVM: nVMX: Fix mmu context after VMLAUNCH/VMRESUME failure)", but retains the core aspects of that patch, just in an open coded form due to the need to pull state from vmcs01 instead of vmcs12. Restoring host state resolves a variety of issues introduced by commit "4f350c6dbcb9 (kvm: nVMX: Handle deferred early VMLAUNCH/VMRESUME failure properly)", which remedied the incorrect behavior of treating VMFail like VMExit but in doing so neglected to restore arch state that had been modified prior to attempting nested VMEnter. A sample failure that occurs due to stale vcpu.arch state is a fault of some form while emulating an LGDT (due to emulated UMIP) from L1 after a failed VMEntry to L3, in this case when running the KVM unit test test_tpr_threshold_values in L1. L0 also hits a WARN in this case due to a stale arch.cr4.UMIP. L1: BUG: unable to handle kernel paging request at ffffc90000663b9e PGD 276512067 P4D 276512067 PUD 276513067 PMD 274efa067 PTE 8000000271de2163 Oops: 0009 [#1] SMP CPU: 5 PID: 12495 Comm: qemu-system-x86 Tainted: G W 4.18.0-rc2+ #2 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015 RIP: 0010:native_load_gdt+0x0/0x10 ... Call Trace: load_fixmap_gdt+0x22/0x30 __vmx_load_host_state+0x10e/0x1c0 [kvm_intel] vmx_switch_vmcs+0x2d/0x50 [kvm_intel] nested_vmx_vmexit+0x222/0x9c0 [kvm_intel] vmx_handle_exit+0x246/0x15a0 [kvm_intel] kvm_arch_vcpu_ioctl_run+0x850/0x1830 [kvm] kvm_vcpu_ioctl+0x3a1/0x5c0 [kvm] do_vfs_ioctl+0x9f/0x600 ksys_ioctl+0x66/0x70 __x64_sys_ioctl+0x16/0x20 do_syscall_64+0x4f/0x100 entry_SYSCALL_64_after_hwframe+0x44/0xa9 L0: WARNING: CPU: 2 PID: 3529 at arch/x86/kvm/vmx.c:6618 handle_desc+0x28/0x30 [kvm_intel] ... CPU: 2 PID: 3529 Comm: qemu-system-x86 Not tainted 4.17.2-coffee+ #76 Hardware name: Intel Corporation Kabylake Client platform/KBL S RIP: 0010:handle_desc+0x28/0x30 [kvm_intel] ... Call Trace: kvm_arch_vcpu_ioctl_run+0x863/0x1840 [kvm] kvm_vcpu_ioctl+0x3a1/0x5c0 [kvm] do_vfs_ioctl+0x9f/0x5e0 ksys_ioctl+0x66/0x70 __x64_sys_ioctl+0x16/0x20 do_syscall_64+0x49/0xf0 entry_SYSCALL_64_after_hwframe+0x44/0xa9 Fixes: 5af4157388ad (KVM: nVMX: Fix mmu context after VMLAUNCH/VMRESUME failure) Fixes: 4f350c6dbcb9 (kvm: nVMX: Handle deferred early VMLAUNCH/VMRESUME failure properly) Cc: Jim Mattson Cc: Krish Sadhukhan Cc: Paolo Bonzini Cc: Radim KrÄmář Cc: Wanpeng Li Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 173 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 153 insertions(+), 20 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 14d446366ca5..8efce8e0a468 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -13156,24 +13156,6 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, kvm_clear_interrupt_queue(vcpu); } -static void load_vmcs12_mmu_host_state(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) -{ - u32 entry_failure_code; - - nested_ept_uninit_mmu_context(vcpu); - - /* - * Only PDPTE load can fail as the value of cr3 was checked on entry and - * couldn't have changed. - */ - if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code)) - nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); - - if (!enable_ept) - vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; -} - /* * A part of what we need to when the nested L2 guest exits and we want to * run its L1 parent, is to reset L1's guest state to the host state specified @@ -13187,6 +13169,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct kvm_segment seg; + u32 entry_failure_code; if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) vcpu->arch.efer = vmcs12->host_ia32_efer; @@ -13213,7 +13196,17 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); vmx_set_cr4(vcpu, vmcs12->host_cr4); - load_vmcs12_mmu_host_state(vcpu, vmcs12); + nested_ept_uninit_mmu_context(vcpu); + + /* + * Only PDPTE load can fail as the value of cr3 was checked on entry and + * couldn't have changed. + */ + if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code)) + nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); + + if (!enable_ept) + vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; /* * If vmcs01 doesn't use VPID, CPU flushes TLB on every @@ -13311,6 +13304,140 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); } +static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx) +{ + struct shared_msr_entry *efer_msr; + unsigned int i; + + if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER) + return vmcs_read64(GUEST_IA32_EFER); + + if (cpu_has_load_ia32_efer) + return host_efer; + + for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { + if (vmx->msr_autoload.guest.val[i].index == MSR_EFER) + return vmx->msr_autoload.guest.val[i].value; + } + + efer_msr = find_msr_entry(vmx, MSR_EFER); + if (efer_msr) + return efer_msr->data; + + return host_efer; +} + +static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct vmx_msr_entry g, h; + struct msr_data msr; + gpa_t gpa; + u32 i, j; + + vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT); + + if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { + /* + * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set + * as vmcs01.GUEST_DR7 contains a userspace defined value + * and vcpu->arch.dr7 is not squirreled away before the + * nested VMENTER (not worth adding a variable in nested_vmx). + */ + if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) + kvm_set_dr(vcpu, 7, DR7_FIXED_1); + else + WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7))); + } + + /* + * Note that calling vmx_set_{efer,cr0,cr4} is important as they + * handle a variety of side effects to KVM's software model. + */ + vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx)); + + vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; + vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW)); + + vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); + vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW)); + + nested_ept_uninit_mmu_context(vcpu); + vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); + __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); + + /* + * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs + * from vmcs01 (if necessary). The PDPTRs are not loaded on + * VMFail, like everything else we just need to ensure our + * software model is up-to-date. + */ + ept_save_pdptrs(vcpu); + + kvm_mmu_reset_context(vcpu); + + if (cpu_has_vmx_msr_bitmap()) + vmx_update_msr_bitmap(vcpu); + + /* + * This nasty bit of open coding is a compromise between blindly + * loading L1's MSRs using the exit load lists (incorrect emulation + * of VMFail), leaving the nested VM's MSRs in the software model + * (incorrect behavior) and snapshotting the modified MSRs (too + * expensive since the lists are unbound by hardware). For each + * MSR that was (prematurely) loaded from the nested VMEntry load + * list, reload it from the exit load list if it exists and differs + * from the guest value. The intent is to stuff host state as + * silently as possible, not to fully process the exit load list. + */ + msr.host_initiated = false; + for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) { + gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g)); + if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) { + pr_debug_ratelimited( + "%s read MSR index failed (%u, 0x%08llx)\n", + __func__, i, gpa); + goto vmabort; + } + + for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) { + gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h)); + if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) { + pr_debug_ratelimited( + "%s read MSR failed (%u, 0x%08llx)\n", + __func__, j, gpa); + goto vmabort; + } + if (h.index != g.index) + continue; + if (h.value == g.value) + break; + + if (nested_vmx_load_msr_check(vcpu, &h)) { + pr_debug_ratelimited( + "%s check failed (%u, 0x%x, 0x%x)\n", + __func__, j, h.index, h.reserved); + goto vmabort; + } + + msr.index = h.index; + msr.data = h.value; + if (kvm_set_msr(vcpu, &msr)) { + pr_debug_ratelimited( + "%s WRMSR failed (%u, 0x%x, 0x%llx)\n", + __func__, j, h.index, h.value); + goto vmabort; + } + } + } + + return; + +vmabort: + nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); +} + /* * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1 * and modify vmcs12 to make it see what it would expect to see there if @@ -13455,7 +13582,13 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, */ nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); - load_vmcs12_mmu_host_state(vcpu, vmcs12); + /* + * Restore L1's host state to KVM's software model. We're here + * because a consistency check was caught by hardware, which + * means some amount of guest state has been propagated to KVM's + * model and needs to be unwound to the host's state. + */ + nested_vmx_restore_host_state(vcpu); /* * The emulated instruction was already skipped in From 3ff519f29d98ecdc1961d825d105d68711093b6b Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Thu, 6 Sep 2018 05:58:16 +0800 Subject: [PATCH 116/204] KVM: x86: adjust kvm_mmu_page member to save 8 bytes On a 64bits machine, struct is naturally aligned with 8 bytes. Since kvm_mmu_page member *unsync* and *role* are less then 4 bytes, we can rearrange the sequence to compace the struct. As the comment shows, *role* and *gfn* are used to key the shadow page. In order to keep the comment valid, this patch moves the *unsync* up and exchange the position of *role* and *gfn*. From /proc/slabinfo, it shows the size of kvm_mmu_page is 8 bytes less and with one more object per slap after applying this patch. # name kvm_mmu_page_header 0 0 168 24 kvm_mmu_page_header 0 0 160 25 Signed-off-by: Wei Yang Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 1c09a0d1771f..576ff47a79c4 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -281,18 +281,18 @@ struct kvm_rmap_head { struct kvm_mmu_page { struct list_head link; struct hlist_node hash_link; + bool unsync; /* * The following two entries are used to key the shadow page in the * hash table. */ - gfn_t gfn; union kvm_mmu_page_role role; + gfn_t gfn; u64 *spt; /* hold the gfn of each spte inside spt */ gfn_t *gfns; - bool unsync; int root_count; /* Currently serving as active root */ unsigned int unsync_children; struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ From 55c1dcd80bdf0e9cf30a963fffd3131145f26938 Mon Sep 17 00:00:00 2001 From: Krish Sadhukhan Date: Thu, 27 Sep 2018 14:33:27 -0400 Subject: [PATCH 117/204] nVMX x86: Make nested_vmx_check_pml_controls() concise Suggested-by: Jim Mattson Signed-off-by: Krish Sadhukhan Reviewed-by: Mark Kanda Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 8efce8e0a468..77ac8f5a9671 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -11779,15 +11779,12 @@ static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu, static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { - u64 address = vmcs12->pml_address; - int maxphyaddr = cpuid_maxphyaddr(vcpu); + if (!nested_cpu_has_pml(vmcs12)) + return 0; - if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML)) { - if (!nested_cpu_has_ept(vmcs12) || - !IS_ALIGNED(address, 4096) || - address >> maxphyaddr) - return -EINVAL; - } + if (!nested_cpu_has_ept(vmcs12) || + !page_address_valid(vcpu, vmcs12->pml_address)) + return -EINVAL; return 0; } From daefb7949e661e2ee5e40dc5ca51e5bf42f6c823 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Tue, 4 Sep 2018 23:57:32 +0800 Subject: [PATCH 118/204] KVM: x86: return 0 in case kvm_mmu_memory_cache has min number of objects The code tries to pre-allocate *min* number of objects, so it is ok to return 0 when the kvm_mmu_memory_cache meets the requirement. Signed-off-by: Wei Yang Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 51b953ad9d4e..b383e8432649 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -932,7 +932,7 @@ static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, while (cache->nobjs < ARRAY_SIZE(cache->objects)) { obj = kmem_cache_zalloc(base_cache, GFP_KERNEL); if (!obj) - return -ENOMEM; + return cache->nobjs >= min ? 0 : -ENOMEM; cache->objects[cache->nobjs++] = obj; } return 0; @@ -960,7 +960,7 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, while (cache->nobjs < ARRAY_SIZE(cache->objects)) { page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT); if (!page) - return -ENOMEM; + return cache->nobjs >= min ? 0 : -ENOMEM; cache->objects[cache->nobjs++] = page; } return 0; From aaa45da24e5d8f3906a6500a3e0da64877d69f04 Mon Sep 17 00:00:00 2001 From: Tianyu Lan Date: Fri, 28 Sep 2018 12:45:16 +0000 Subject: [PATCH 119/204] KVM/VMX: Remve unused function is_external_interrupt(). is_external_interrupt() is not used now and so remove it. Signed-off-by: Lan Tianyu Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 77ac8f5a9671..eee1b60023fd 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1628,12 +1628,6 @@ static inline bool is_gp_fault(u32 intr_info) return is_exception_n(intr_info, GP_VECTOR); } -static inline bool is_external_interrupt(u32 intr_info) -{ - return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) - == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); -} - static inline bool is_machine_check(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | From 4fef0f491347785dfd4cca206b80f75c4ec2dc9f Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Fri, 28 Sep 2018 22:08:50 +0800 Subject: [PATCH 120/204] KVM: x86: move definition PT_MAX_HUGEPAGE_LEVEL and KVM_NR_PAGE_SIZES together Currently, there are two definitions related to huge page, but a little bit far from each other and seems loosely connected: * KVM_NR_PAGE_SIZES defines the number of different size a page could map * PT_MAX_HUGEPAGE_LEVEL means the maximum level of huge page The number of different size a page could map equals the maximum level of huge page, which is implied by current definition. While current implementation may not be kind to readers and further developers: * KVM_NR_PAGE_SIZES looks like a stand alone definition at first sight * in case we need to support more level, two places need to change This patch tries to make these two definition more close, so that reader and developer would feel more comfortable to manipulate. Signed-off-by: Wei Yang Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 10 +++++++++- arch/x86/kvm/mmu.h | 5 ----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 576ff47a79c4..ed4d7848ebf2 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -102,7 +102,15 @@ #define UNMAPPED_GVA (~(gpa_t)0) /* KVM Hugepage definitions for x86 */ -#define KVM_NR_PAGE_SIZES 3 +enum { + PT_PAGE_TABLE_LEVEL = 1, + PT_DIRECTORY_LEVEL = 2, + PT_PDPE_LEVEL = 3, + /* set max level to the biggest one */ + PT_MAX_HUGEPAGE_LEVEL = PT_PDPE_LEVEL, +}; +#define KVM_NR_PAGE_SIZES (PT_MAX_HUGEPAGE_LEVEL - \ + PT_PAGE_TABLE_LEVEL + 1) #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9) #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x)) #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 1fab69c0b2f3..99d549905311 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -43,11 +43,6 @@ #define PT32_ROOT_LEVEL 2 #define PT32E_ROOT_LEVEL 3 -#define PT_PDPE_LEVEL 3 -#define PT_DIRECTORY_LEVEL 2 -#define PT_PAGE_TABLE_LEVEL 1 -#define PT_MAX_HUGEPAGE_LEVEL (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1) - static inline u64 rsvd_bits(int s, int e) { if (e < s) From 0624fca9512d08cbd9b5d098d904c840d3432404 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 1 Oct 2018 16:07:18 +0200 Subject: [PATCH 121/204] kvm/x86: return meaningful value from KVM_SIGNAL_MSI If kvm_apic_map_get_dest_lapic() finds a disabled LAPIC, it will return with bitmap==0 and (*r == -1) will be returned to userspace. QEMU may then record "KVM: injection failed, MSI lost (Operation not permitted)" in its log, which is quite puzzling. Reported-by: Peng Hao Signed-off-by: Paolo Bonzini --- arch/x86/kvm/lapic.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 91ffb63397f5..452eed992aea 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -960,14 +960,14 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, map = rcu_dereference(kvm->arch.apic_map); ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap); - if (ret) + if (ret) { + *r = 0; for_each_set_bit(i, &bitmap, 16) { if (!dst[i]) continue; - if (*r < 0) - *r = 0; *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map); } + } rcu_read_unlock(); return ret; From 9170200ec0ebad70e5b9902bc93e2b1b11456a3b Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Wed, 22 Aug 2018 12:18:28 +0200 Subject: [PATCH 122/204] KVM: x86: hyperv: enforce vp_index < KVM_MAX_VCPUS Hyper-V TLFS (5.0b) states: > Virtual processors are identified by using an index (VP index). The > maximum number of virtual processors per partition supported by the > current implementation of the hypervisor can be obtained through CPUID > leaf 0x40000005. A virtual processor index must be less than the > maximum number of virtual processors per partition. Forbid userspace to set VP_INDEX above KVM_MAX_VCPUS. get_vcpu_by_vpidx() can now be optimized to bail early when supplied vpidx is >= KVM_MAX_VCPUS. Signed-off-by: Vitaly Kuznetsov Reviewed-by: Roman Kagan Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 01d209ab5481..0cd597b0f754 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -132,8 +132,10 @@ static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx) struct kvm_vcpu *vcpu = NULL; int i; - if (vpidx < KVM_MAX_VCPUS) - vcpu = kvm_get_vcpu(kvm, vpidx); + if (vpidx >= KVM_MAX_VCPUS) + return NULL; + + vcpu = kvm_get_vcpu(kvm, vpidx); if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx) return vcpu; kvm_for_each_vcpu(i, vcpu, kvm) @@ -1044,7 +1046,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) switch (msr) { case HV_X64_MSR_VP_INDEX: - if (!host) + if (!host || (u32)data >= KVM_MAX_VCPUS) return 1; hv->vp_index = (u32)data; break; From a812297c4fd9c2c9337b451ad8d66083c5b24ceb Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Wed, 22 Aug 2018 12:18:29 +0200 Subject: [PATCH 123/204] KVM: x86: hyperv: optimize 'all cpus' case in kvm_hv_flush_tlb() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We can use 'NULL' to represent 'all cpus' case in kvm_make_vcpus_request_mask() and avoid building vCPU mask with all vCPUs. Suggested-by: Radim Krčmář Signed-off-by: Vitaly Kuznetsov Reviewed-by: Roman Kagan Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 42 +++++++++++++++++++++++------------------- virt/kvm/kvm_main.c | 6 ++---- 2 files changed, 25 insertions(+), 23 deletions(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 0cd597b0f754..b45ce136be2f 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1325,35 +1325,39 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, cpumask_clear(&hv_current->tlb_lush); + if (all_cpus) { + kvm_make_vcpus_request_mask(kvm, + KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP, + NULL, &hv_current->tlb_lush); + goto ret_success; + } + kvm_for_each_vcpu(i, vcpu, kvm) { struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; int bank = hv->vp_index / 64, sbank = 0; - if (!all_cpus) { - /* Banks >64 can't be represented */ - if (bank >= 64) - continue; + /* Banks >64 can't be represented */ + if (bank >= 64) + continue; - /* Non-ex hypercalls can only address first 64 vCPUs */ - if (!ex && bank) - continue; + /* Non-ex hypercalls can only address first 64 vCPUs */ + if (!ex && bank) + continue; - if (ex) { - /* - * Check is the bank of this vCPU is in sparse - * set and get the sparse bank number. - */ - sbank = get_sparse_bank_no(valid_bank_mask, - bank); + if (ex) { + /* + * Check is the bank of this vCPU is in sparse + * set and get the sparse bank number. + */ + sbank = get_sparse_bank_no(valid_bank_mask, bank); - if (sbank < 0) - continue; - } - - if (!(sparse_banks[sbank] & BIT_ULL(hv->vp_index % 64))) + if (sbank < 0) continue; } + if (!(sparse_banks[sbank] & BIT_ULL(hv->vp_index % 64))) + continue; + /* * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we * can't analyze it here, flush TLB regardless of the specified diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index f986e31fa68c..587e1a0a8715 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -219,7 +219,7 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, me = get_cpu(); kvm_for_each_vcpu(i, vcpu, kvm) { - if (!test_bit(i, vcpu_bitmap)) + if (vcpu_bitmap && !test_bit(i, vcpu_bitmap)) continue; kvm_make_request(req, vcpu); @@ -243,12 +243,10 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) { cpumask_var_t cpus; bool called; - static unsigned long vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)] - = {[0 ... BITS_TO_LONGS(KVM_MAX_VCPUS)-1] = ULONG_MAX}; zalloc_cpumask_var(&cpus, GFP_ATOMIC); - called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap, cpus); + called = kvm_make_vcpus_request_mask(kvm, req, NULL, cpus); free_cpumask_var(cpus); return called; From 1779a39f786397760ae7a7cc03cf37697d8ae58d Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Wed, 26 Sep 2018 19:02:55 +0200 Subject: [PATCH 124/204] KVM: x86: hyperv: consistently use 'hv_vcpu' for 'struct kvm_vcpu_hv' variables Rename 'hv' to 'hv_vcpu' in kvm_hv_set_msr/kvm_hv_get_msr(); 'hv' is 'reserved' for 'struct kvm_hv' variables across the file. Signed-off-by: Vitaly Kuznetsov Reviewed-by: Roman Kagan Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index b45ce136be2f..c8764faf783b 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1042,20 +1042,20 @@ static u64 current_task_runtime_100ns(void) static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) { - struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; + struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; switch (msr) { case HV_X64_MSR_VP_INDEX: if (!host || (u32)data >= KVM_MAX_VCPUS) return 1; - hv->vp_index = (u32)data; + hv_vcpu->vp_index = (u32)data; break; case HV_X64_MSR_VP_ASSIST_PAGE: { u64 gfn; unsigned long addr; if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) { - hv->hv_vapic = data; + hv_vcpu->hv_vapic = data; if (kvm_lapic_enable_pv_eoi(vcpu, 0)) return 1; break; @@ -1066,7 +1066,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) return 1; if (__clear_user((void __user *)addr, PAGE_SIZE)) return 1; - hv->hv_vapic = data; + hv_vcpu->hv_vapic = data; kvm_vcpu_mark_page_dirty(vcpu, gfn); if (kvm_lapic_enable_pv_eoi(vcpu, gfn_to_gpa(gfn) | KVM_MSR_ENABLED)) @@ -1082,7 +1082,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) case HV_X64_MSR_VP_RUNTIME: if (!host) return 1; - hv->runtime_offset = data - current_task_runtime_100ns(); + hv_vcpu->runtime_offset = data - current_task_runtime_100ns(); break; case HV_X64_MSR_SCONTROL: case HV_X64_MSR_SVERSION: @@ -1174,11 +1174,11 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) { u64 data = 0; - struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; + struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; switch (msr) { case HV_X64_MSR_VP_INDEX: - data = hv->vp_index; + data = hv_vcpu->vp_index; break; case HV_X64_MSR_EOI: return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); @@ -1187,10 +1187,10 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, case HV_X64_MSR_TPR: return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); case HV_X64_MSR_VP_ASSIST_PAGE: - data = hv->hv_vapic; + data = hv_vcpu->hv_vapic; break; case HV_X64_MSR_VP_RUNTIME: - data = current_task_runtime_100ns() + hv->runtime_offset; + data = current_task_runtime_100ns() + hv_vcpu->runtime_offset; break; case HV_X64_MSR_SCONTROL: case HV_X64_MSR_SVERSION: From 87ee613d076351950b74383215437f841ebbeb75 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Wed, 26 Sep 2018 19:02:56 +0200 Subject: [PATCH 125/204] KVM: x86: hyperv: keep track of mismatched VP indexes In most common cases VP index of a vcpu matches its vcpu index. Userspace is, however, free to set any mapping it wishes and we need to account for that when we need to find a vCPU with a particular VP index. To keep search algorithms optimal in both cases introduce 'num_mismatched_vp_indexes' counter showing how many vCPUs with mismatching VP index we have. In case the counter is zero we can assume vp_index == vcpu_idx. Signed-off-by: Vitaly Kuznetsov Reviewed-by: Roman Kagan Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 3 +++ arch/x86/kvm/hyperv.c | 26 +++++++++++++++++++++++--- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index ed4d7848ebf2..d81f536d024a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -790,6 +790,9 @@ struct kvm_hv { u64 hv_reenlightenment_control; u64 hv_tsc_emulation_control; u64 hv_tsc_emulation_status; + + /* How many vCPUs have VP index != vCPU index */ + atomic_t num_mismatched_vp_indexes; }; enum kvm_irqchip_mode { diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index c8764faf783b..f94dedd7ae6d 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1045,11 +1045,31 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; switch (msr) { - case HV_X64_MSR_VP_INDEX: - if (!host || (u32)data >= KVM_MAX_VCPUS) + case HV_X64_MSR_VP_INDEX: { + struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; + int vcpu_idx = kvm_vcpu_get_idx(vcpu); + u32 new_vp_index = (u32)data; + + if (!host || new_vp_index >= KVM_MAX_VCPUS) return 1; - hv_vcpu->vp_index = (u32)data; + + if (new_vp_index == hv_vcpu->vp_index) + return 0; + + /* + * The VP index is initialized to vcpu_index by + * kvm_hv_vcpu_postcreate so they initially match. Now the + * VP index is changing, adjust num_mismatched_vp_indexes if + * it now matches or no longer matches vcpu_idx. + */ + if (hv_vcpu->vp_index == vcpu_idx) + atomic_inc(&hv->num_mismatched_vp_indexes); + else if (new_vp_index == vcpu_idx) + atomic_dec(&hv->num_mismatched_vp_indexes); + + hv_vcpu->vp_index = new_vp_index; break; + } case HV_X64_MSR_VP_ASSIST_PAGE: { u64 gfn; unsigned long addr; From 0b0a31badb2d98967175c6812ac81db20f9a67fc Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Wed, 26 Sep 2018 19:02:57 +0200 Subject: [PATCH 126/204] KVM: x86: hyperv: valid_bank_mask should be 'u64' This probably doesn't matter much (KVM_MAX_VCPUS is much lower nowadays) but valid_bank_mask is really u64 and not unsigned long. Signed-off-by: Vitaly Kuznetsov Reviewed-by: Roman Kagan Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index f94dedd7ae6d..a2af1c9489fd 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1300,7 +1300,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, struct hv_tlb_flush flush; struct kvm_vcpu *vcpu; unsigned long vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)] = {0}; - unsigned long valid_bank_mask = 0; + u64 valid_bank_mask = 0; u64 sparse_banks[64]; int sparse_banks_len, i; bool all_cpus; @@ -1328,7 +1328,8 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, all_cpus = flush_ex.hv_vp_set.format != HV_GENERIC_SET_SPARSE_4K; - sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) * + sparse_banks_len = + bitmap_weight((unsigned long *)&valid_bank_mask, 64) * sizeof(sparse_banks[0]); if (!sparse_banks_len && !all_cpus) From 2cefc5feb80cf4237890a6a4582a0d20e50d9ced Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Wed, 26 Sep 2018 19:02:58 +0200 Subject: [PATCH 127/204] KVM: x86: hyperv: optimize kvm_hv_flush_tlb() for vp_index == vcpu_idx case VP inedx almost always matches VCPU and when it does it's faster to walk the sparse set instead of all vcpus. Signed-off-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 98 +++++++++++++++++++++++-------------------- 1 file changed, 53 insertions(+), 45 deletions(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index a2af1c9489fd..cb69ca2223fa 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1277,32 +1277,37 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) return kvm_hv_get_msr(vcpu, msr, pdata, host); } -static __always_inline int get_sparse_bank_no(u64 valid_bank_mask, int bank_no) +static __always_inline bool hv_vcpu_in_sparse_set(struct kvm_vcpu_hv *hv_vcpu, + u64 sparse_banks[], + u64 valid_bank_mask) { - int i = 0, j; + int bank = hv_vcpu->vp_index / 64, sbank; - if (!(valid_bank_mask & BIT_ULL(bank_no))) - return -1; + if (bank >= 64) + return false; - for (j = 0; j < bank_no; j++) - if (valid_bank_mask & BIT_ULL(j)) - i++; + if (!(valid_bank_mask & BIT_ULL(bank))) + return false; - return i; + /* Sparse bank number equals to the number of set bits before it */ + sbank = bitmap_weight((unsigned long *)&valid_bank_mask, bank); + + return !!(sparse_banks[sbank] & BIT_ULL(hv_vcpu->vp_index % 64)); } static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, u16 rep_cnt, bool ex) { struct kvm *kvm = current_vcpu->kvm; - struct kvm_vcpu_hv *hv_current = ¤t_vcpu->arch.hyperv; + struct kvm_hv *hv = &kvm->arch.hyperv; + struct kvm_vcpu_hv *hv_vcpu = ¤t_vcpu->arch.hyperv; struct hv_tlb_flush_ex flush_ex; struct hv_tlb_flush flush; struct kvm_vcpu *vcpu; unsigned long vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)] = {0}; - u64 valid_bank_mask = 0; + u64 valid_bank_mask; u64 sparse_banks[64]; - int sparse_banks_len, i; + int sparse_banks_len, i, bank, sbank; bool all_cpus; if (!ex) { @@ -1312,6 +1317,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, trace_kvm_hv_flush_tlb(flush.processor_mask, flush.address_space, flush.flags); + valid_bank_mask = BIT_ULL(0); sparse_banks[0] = flush.processor_mask; all_cpus = flush.flags & HV_FLUSH_ALL_PROCESSORS; } else { @@ -1344,52 +1350,54 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, return HV_STATUS_INVALID_HYPERCALL_INPUT; } - cpumask_clear(&hv_current->tlb_lush); + /* + * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't + * analyze it here, flush TLB regardless of the specified address space. + */ + cpumask_clear(&hv_vcpu->tlb_lush); if (all_cpus) { kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP, - NULL, &hv_current->tlb_lush); + NULL, &hv_vcpu->tlb_lush); goto ret_success; } - kvm_for_each_vcpu(i, vcpu, kvm) { - struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; - int bank = hv->vp_index / 64, sbank = 0; - - /* Banks >64 can't be represented */ - if (bank >= 64) - continue; - - /* Non-ex hypercalls can only address first 64 vCPUs */ - if (!ex && bank) - continue; - - if (ex) { - /* - * Check is the bank of this vCPU is in sparse - * set and get the sparse bank number. - */ - sbank = get_sparse_bank_no(valid_bank_mask, bank); - - if (sbank < 0) - continue; + if (atomic_read(&hv->num_mismatched_vp_indexes)) { + kvm_for_each_vcpu(i, vcpu, kvm) { + if (hv_vcpu_in_sparse_set(&vcpu->arch.hyperv, + sparse_banks, + valid_bank_mask)) + __set_bit(i, vcpu_bitmap); } - - if (!(sparse_banks[sbank] & BIT_ULL(hv->vp_index % 64))) - continue; - - /* - * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we - * can't analyze it here, flush TLB regardless of the specified - * address space. - */ - __set_bit(i, vcpu_bitmap); + goto flush_request; } + /* + * num_mismatched_vp_indexes is zero so every vcpu has + * vp_index == vcpu_idx. + */ + sbank = 0; + for_each_set_bit(bank, (unsigned long *)&valid_bank_mask, + BITS_PER_LONG) { + for_each_set_bit(i, + (unsigned long *)&sparse_banks[sbank], + BITS_PER_LONG) { + u32 vp_index = bank * 64 + i; + + /* A non-existent vCPU was specified */ + if (vp_index >= KVM_MAX_VCPUS) + return HV_STATUS_INVALID_HYPERCALL_INPUT; + + __set_bit(vp_index, vcpu_bitmap); + } + sbank++; + } + +flush_request: kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP, - vcpu_bitmap, &hv_current->tlb_lush); + vcpu_bitmap, &hv_vcpu->tlb_lush); ret_success: /* We always do full TLB flush, set rep_done = rep_cnt. */ From 214ff83d4473a7757fa18a64dc7efe3b0e158486 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Wed, 26 Sep 2018 19:02:59 +0200 Subject: [PATCH 128/204] KVM: x86: hyperv: implement PV IPI send hypercalls Using hypercall for sending IPIs is faster because this allows to specify any number of vCPUs (even > 64 with sparse CPU set), the whole procedure will take only one VMEXIT. Current Hyper-V TLFS (v5.0b) claims that HvCallSendSyntheticClusterIpi hypercall can't be 'fast' (passing parameters through registers) but apparently this is not true, Windows always uses it as 'fast' so we need to support that. Signed-off-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini --- Documentation/virtual/kvm/api.txt | 7 ++ arch/x86/kvm/hyperv.c | 115 ++++++++++++++++++++++++++++++ arch/x86/kvm/trace.h | 42 +++++++++++ arch/x86/kvm/x86.c | 1 + include/uapi/linux/kvm.h | 1 + 5 files changed, 166 insertions(+) diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index df98b6304769..48e5d1197295 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -4791,3 +4791,10 @@ CPU when the exception is taken. If this virtual SError is taken to EL1 using AArch64, this value will be reported in the ISS field of ESR_ELx. See KVM_CAP_VCPU_EVENTS for more details. +8.20 KVM_CAP_HYPERV_SEND_IPI + +Architectures: x86 + +This capability indicates that KVM supports paravirtualized Hyper-V IPI send +hypercalls: +HvCallSendSyntheticClusterIpi, HvCallSendSyntheticClusterIpiEx. diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index cb69ca2223fa..bad4bffdc8b9 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1405,6 +1405,107 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, ((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET); } +static u64 kvm_hv_send_ipi(struct kvm_vcpu *current_vcpu, u64 ingpa, u64 outgpa, + bool ex, bool fast) +{ + struct kvm *kvm = current_vcpu->kvm; + struct kvm_hv *hv = &kvm->arch.hyperv; + struct hv_send_ipi_ex send_ipi_ex; + struct hv_send_ipi send_ipi; + struct kvm_vcpu *vcpu; + unsigned long valid_bank_mask; + u64 sparse_banks[64]; + int sparse_banks_len, bank, i, sbank; + struct kvm_lapic_irq irq = {.delivery_mode = APIC_DM_FIXED}; + bool all_cpus; + + if (!ex) { + if (!fast) { + if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi, + sizeof(send_ipi)))) + return HV_STATUS_INVALID_HYPERCALL_INPUT; + sparse_banks[0] = send_ipi.cpu_mask; + irq.vector = send_ipi.vector; + } else { + /* 'reserved' part of hv_send_ipi should be 0 */ + if (unlikely(ingpa >> 32 != 0)) + return HV_STATUS_INVALID_HYPERCALL_INPUT; + sparse_banks[0] = outgpa; + irq.vector = (u32)ingpa; + } + all_cpus = false; + valid_bank_mask = BIT_ULL(0); + + trace_kvm_hv_send_ipi(irq.vector, sparse_banks[0]); + } else { + if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi_ex, + sizeof(send_ipi_ex)))) + return HV_STATUS_INVALID_HYPERCALL_INPUT; + + trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector, + send_ipi_ex.vp_set.format, + send_ipi_ex.vp_set.valid_bank_mask); + + irq.vector = send_ipi_ex.vector; + valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask; + sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) * + sizeof(sparse_banks[0]); + + all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL; + + if (!sparse_banks_len) + goto ret_success; + + if (!all_cpus && + kvm_read_guest(kvm, + ingpa + offsetof(struct hv_send_ipi_ex, + vp_set.bank_contents), + sparse_banks, + sparse_banks_len)) + return HV_STATUS_INVALID_HYPERCALL_INPUT; + } + + if ((irq.vector < HV_IPI_LOW_VECTOR) || + (irq.vector > HV_IPI_HIGH_VECTOR)) + return HV_STATUS_INVALID_HYPERCALL_INPUT; + + if (all_cpus || atomic_read(&hv->num_mismatched_vp_indexes)) { + kvm_for_each_vcpu(i, vcpu, kvm) { + if (all_cpus || hv_vcpu_in_sparse_set( + &vcpu->arch.hyperv, sparse_banks, + valid_bank_mask)) { + /* We fail only when APIC is disabled */ + kvm_apic_set_irq(vcpu, &irq, NULL); + } + } + goto ret_success; + } + + /* + * num_mismatched_vp_indexes is zero so every vcpu has + * vp_index == vcpu_idx. + */ + sbank = 0; + for_each_set_bit(bank, (unsigned long *)&valid_bank_mask, 64) { + for_each_set_bit(i, (unsigned long *)&sparse_banks[sbank], 64) { + u32 vp_index = bank * 64 + i; + struct kvm_vcpu *vcpu = + get_vcpu_by_vpidx(kvm, vp_index); + + /* Unknown vCPU specified */ + if (!vcpu) + continue; + + /* We fail only when APIC is disabled */ + kvm_apic_set_irq(vcpu, &irq, NULL); + } + sbank++; + } + +ret_success: + return HV_STATUS_SUCCESS; +} + bool kvm_hv_hypercall_enabled(struct kvm *kvm) { return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE; @@ -1574,6 +1675,20 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) } ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true); break; + case HVCALL_SEND_IPI: + if (unlikely(rep)) { + ret = HV_STATUS_INVALID_HYPERCALL_INPUT; + break; + } + ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, false, fast); + break; + case HVCALL_SEND_IPI_EX: + if (unlikely(fast || rep)) { + ret = HV_STATUS_INVALID_HYPERCALL_INPUT; + break; + } + ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, true, false); + break; default: ret = HV_STATUS_INVALID_HYPERCALL_CODE; break; diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 0f997683404f..0659465a745c 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -1418,6 +1418,48 @@ TRACE_EVENT(kvm_hv_flush_tlb_ex, __entry->valid_bank_mask, __entry->format, __entry->address_space, __entry->flags) ); + +/* + * Tracepoints for kvm_hv_send_ipi. + */ +TRACE_EVENT(kvm_hv_send_ipi, + TP_PROTO(u32 vector, u64 processor_mask), + TP_ARGS(vector, processor_mask), + + TP_STRUCT__entry( + __field(u32, vector) + __field(u64, processor_mask) + ), + + TP_fast_assign( + __entry->vector = vector; + __entry->processor_mask = processor_mask; + ), + + TP_printk("vector %x processor_mask 0x%llx", + __entry->vector, __entry->processor_mask) +); + +TRACE_EVENT(kvm_hv_send_ipi_ex, + TP_PROTO(u32 vector, u64 format, u64 valid_bank_mask), + TP_ARGS(vector, format, valid_bank_mask), + + TP_STRUCT__entry( + __field(u32, vector) + __field(u64, format) + __field(u64, valid_bank_mask) + ), + + TP_fast_assign( + __entry->vector = vector; + __entry->format = format; + __entry->valid_bank_mask = valid_bank_mask; + ), + + TP_printk("vector %x format %llx valid_bank_mask 0x%llx", + __entry->vector, __entry->format, + __entry->valid_bank_mask) +); #endif /* _TRACE_KVM_H */ #undef TRACE_INCLUDE_PATH diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1f3f95557703..20a667da0a31 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2912,6 +2912,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_HYPERV_VP_INDEX: case KVM_CAP_HYPERV_EVENTFD: case KVM_CAP_HYPERV_TLBFLUSH: + case KVM_CAP_HYPERV_SEND_IPI: case KVM_CAP_PCI_SEGMENT: case KVM_CAP_DEBUGREGS: case KVM_CAP_X86_ROBUST_SINGLESTEP: diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 7f2ff3a76995..7785678caedb 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -955,6 +955,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ARM_INJECT_SERROR_ESR 158 #define KVM_CAP_MSR_PLATFORM_INFO 159 #define KVM_CAP_PPC_NESTED_HV 160 +#define KVM_CAP_HYPERV_SEND_IPI 161 #ifdef KVM_CAP_IRQ_ROUTING From 3c6e099fa15fdb6fb1892199ed8709012e1294f2 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Thu, 13 Sep 2018 11:54:48 -0700 Subject: [PATCH 129/204] KVM: nVMX: Always reflect #NM VM-exits to L1 When bit 3 (corresponding to CR0.TS) of the VMCS12 cr0_guest_host_mask field is clear, the VMCS12 guest_cr0 field does not necessarily hold the current value of the L2 CR0.TS bit, so the code that checked for L2's CR0.TS bit being set was incorrect. Moreover, I'm not sure that the CR0.TS check was adequate. (What if L2's CR0.EM was set, for instance?) Fortunately, lazy FPU has gone away, so L0 has lost all interest in intercepting #NM exceptions. See commit bd7e5b0899a4 ("KVM: x86: remove code for lazy FPU handling"). Therefore, there is no longer any question of which hypervisor gets first dibs. The #NM VM-exit should always be reflected to L1. (Note that the corresponding bit must be set in the VMCS12 exception_bitmap field for there to be an #NM VM-exit at all.) Fixes: ccf9844e5d99c ("kvm, vmx: Really fix lazy FPU on nested guest") Reported-by: Abhiroop Dabral Signed-off-by: Jim Mattson Reviewed-by: Peter Shier Tested-by: Abhiroop Dabral Reviewed-by: Liran Alon Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index eee1b60023fd..b967f89963bf 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1613,11 +1613,6 @@ static inline bool is_page_fault(u32 intr_info) return is_exception_n(intr_info, PF_VECTOR); } -static inline bool is_no_device(u32 intr_info) -{ - return is_exception_n(intr_info, NM_VECTOR); -} - static inline bool is_invalid_opcode(u32 intr_info) { return is_exception_n(intr_info, UD_VECTOR); @@ -9653,9 +9648,6 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) return false; else if (is_page_fault(intr_info)) return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept; - else if (is_no_device(intr_info) && - !(vmcs12->guest_cr0 & X86_CR0_TS)) - return false; else if (is_debug(intr_info) && vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) From 31fc4f95dddc4ebc9f9596a2720662e15e5d444e Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Wed, 22 Aug 2018 21:57:11 +0800 Subject: [PATCH 130/204] KVM: leverage change to adjust slots->used_slots in update_memslots() update_memslots() is only called by __kvm_set_memory_region(), in which "change" is calculated and indicates how to adjust slots->used_slots * increase by one if it is KVM_MR_CREATE * decrease by one if it is KVM_MR_DELETE * not change for others This patch adjusts slots->used_slots in update_memslots() based on "change" value instead of re-calculate those states again. Signed-off-by: Wei Yang Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 587e1a0a8715..acc951cc2663 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -805,20 +805,25 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) * sorted array and known changed memslot position. */ static void update_memslots(struct kvm_memslots *slots, - struct kvm_memory_slot *new) + struct kvm_memory_slot *new, + enum kvm_mr_change change) { int id = new->id; int i = slots->id_to_index[id]; struct kvm_memory_slot *mslots = slots->memslots; WARN_ON(mslots[i].id != id); - if (!new->npages) { - WARN_ON(!mslots[i].npages); - if (mslots[i].npages) - slots->used_slots--; - } else { - if (!mslots[i].npages) - slots->used_slots++; + switch (change) { + case KVM_MR_CREATE: + slots->used_slots++; + WARN_ON(mslots[i].npages || !new->npages); + break; + case KVM_MR_DELETE: + slots->used_slots--; + WARN_ON(new->npages || !mslots[i].npages); + break; + default: + break; } while (i < KVM_MEM_SLOTS_NUM - 1 && @@ -1054,7 +1059,7 @@ int __kvm_set_memory_region(struct kvm *kvm, memset(&new.arch, 0, sizeof(new.arch)); } - update_memslots(slots, &new); + update_memslots(slots, &new, change); old_memslots = install_new_memslots(kvm, as_id, slots); kvm_arch_commit_memory_region(kvm, mem, &old, &new, change); From 64a919f7b5d0593227d0ea7976949c1248ec36ba Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 26 Sep 2018 09:23:39 -0700 Subject: [PATCH 131/204] KVM: nVMX: move host EFER consistency checks to VMFail path Invalid host state related to loading EFER on VMExit causes a VMFail(VMXERR_ENTRY_INVALID_HOST_STATE_FIELD), not a VMExit. Signed-off-by: Sean Christopherson Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index b967f89963bf..a914e7ca044c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -12400,6 +12400,7 @@ static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12) static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct vcpu_vmx *vmx = to_vmx(vcpu); + bool ia32e; if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) @@ -12473,6 +12474,21 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) !nested_cr3_valid(vcpu, vmcs12->host_cr3)) return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD; + /* + * If the load IA32_EFER VM-exit control is 1, bits reserved in the + * IA32_EFER MSR must be 0 in the field for that register. In addition, + * the values of the LMA and LME bits in the field must each be that of + * the host address-space size VM-exit control. + */ + if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { + ia32e = (vmcs12->vm_exit_controls & + VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0; + if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) || + ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) || + ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) + return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD; + } + /* * From the Intel SDM, volume 3: * Fields relevant to VM-entry event injection must be set properly. @@ -12594,21 +12610,6 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, return 1; } - /* - * If the load IA32_EFER VM-exit control is 1, bits reserved in the - * IA32_EFER MSR must be 0 in the field for that register. In addition, - * the values of the LMA and LME bits in the field must each be that of - * the host address-space size VM-exit control. - */ - if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { - ia32e = (vmcs12->vm_exit_controls & - VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0; - if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) || - ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) || - ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) - return 1; - } - if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) && (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) || (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))) From 5b8ba41dafd789a64cdfb2b5ab6b0eb71f821cfc Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 26 Sep 2018 09:23:40 -0700 Subject: [PATCH 132/204] KVM: nVMX: move vmcs12 EPTP consistency check to check_vmentry_prereqs() An invalid EPTP causes a VMFail(VMXERR_ENTRY_INVALID_CONTROL_FIELD), not a VMExit. Signed-off-by: Sean Christopherson Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a914e7ca044c..3831e0968d6a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -11327,11 +11327,9 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu) return get_vmcs12(vcpu)->ept_pointer; } -static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) +static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) { WARN_ON(mmu_is_nested(vcpu)); - if (!valid_ept_address(vcpu, nested_ept_get_cr3(vcpu))) - return 1; kvm_init_shadow_ept_mmu(vcpu, to_vmx(vcpu)->nested.msrs.ept_caps & @@ -11343,7 +11341,6 @@ static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault; vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; - return 0; } static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) @@ -12327,15 +12324,11 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); } - if (nested_cpu_has_ept(vmcs12)) { - if (nested_ept_init_mmu_context(vcpu)) { - *entry_failure_code = ENTRY_FAIL_DEFAULT; - return 1; - } - } else if (nested_cpu_has2(vmcs12, - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { + if (nested_cpu_has_ept(vmcs12)) + nested_ept_init_mmu_context(vcpu); + else if (nested_cpu_has2(vmcs12, + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) vmx_flush_tlb(vcpu, true); - } /* * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those @@ -12545,6 +12538,10 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) } } + if (nested_cpu_has_ept(vmcs12) && + !valid_ept_address(vcpu, vmcs12->ept_pointer)) + return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + return 0; } From 1abf23fb42f55eece2eefeea1b32aca865a14099 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 26 Sep 2018 09:23:41 -0700 Subject: [PATCH 133/204] KVM: nVMX: use vm_exit_controls_init() to write exit controls for vmcs02 Write VM_EXIT_CONTROLS using vm_exit_controls_init() when configuring vmcs02, otherwise vm_exit_controls_shadow will be stale. EFER in particular can be corrupted if VM_EXIT_LOAD_IA32_EFER is not updated due to an incorrect shadow optimization, which can crash L0 due to EFER not being loaded on exit. This does not occur with the current code base simply because update_transition_efer() unconditionally clears VM_EXIT_LOAD_IA32_EFER before conditionally setting it, and because a nested guest always starts with VM_EXIT_LOAD_IA32_EFER clear, i.e. we'll only ever unnecessarily clear the bit. That is, until someone optimizes update_transition_efer()... Signed-off-by: Sean Christopherson Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 3831e0968d6a..3c9c8ec168e4 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -12262,7 +12262,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER * bits are further modified by vmx_set_efer() below. */ - vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl); + vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl); /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are * emulated by vmx_set_efer(), below. From b7031fd40fcc741b0f9b0c04c8d844e445858b84 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 26 Sep 2018 09:23:42 -0700 Subject: [PATCH 134/204] KVM: nVMX: reset cache/shadows when switching loaded VMCS Reset the vm_{entry,exit}_controls_shadow variables as well as the segment cache after loading a new VMCS in vmx_switch_vmcs(). The shadows/cache track VMCS data, i.e. they're stale every time we switch to a new VMCS regardless of reason. This fixes a bug where stale control shadows would be consumed after a nested VMExit due to a failed consistency check. Suggested-by: Jim Mattson Signed-off-by: Sean Christopherson Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 3c9c8ec168e4..36d602559958 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -10951,6 +10951,10 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) vmx->loaded_vmcs = vmcs; vmx_vcpu_load(vcpu, cpu); put_cpu(); + + vm_entry_controls_reset_shadow(vmx); + vm_exit_controls_reset_shadow(vmx); + vmx_segment_cache_clear(vmx); } /* @@ -12642,7 +12646,6 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual) vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); - vmx_segment_cache_clear(vmx); if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) vcpu->arch.tsc_offset += vmcs12->tsc_offset; @@ -13471,9 +13474,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, } vmx_switch_vmcs(vcpu, &vmx->vmcs01); - vm_entry_controls_reset_shadow(vmx); - vm_exit_controls_reset_shadow(vmx); - vmx_segment_cache_clear(vmx); /* Update any VMCS fields that might have changed while L2 ran */ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); From 02343cf207022ba349a9d66562659fce87144369 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 26 Sep 2018 09:23:43 -0700 Subject: [PATCH 135/204] KVM: vmx: do not unconditionally clear EFER switching Do not unconditionally call clear_atomic_switch_msr() when updating EFER. This adds up to four unnecessary VMWrites in the case where guest_efer != host_efer, e.g. if the load_on_{entry,exit} bits were already set. Signed-off-by: Sean Christopherson Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 36d602559958..7a245f1dca87 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2790,8 +2790,6 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) ignore_bits &= ~(u64)EFER_SCE; #endif - clear_atomic_switch_msr(vmx, MSR_EFER); - /* * On EPT, we can't emulate NX, so we must switch EFER atomically. * On CPUs that support "load IA32_EFER", always switch EFER @@ -2804,8 +2802,12 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) if (guest_efer != host_efer) add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer, false); + else + clear_atomic_switch_msr(vmx, MSR_EFER); return false; } else { + clear_atomic_switch_msr(vmx, MSR_EFER); + guest_efer &= ~ignore_bits; guest_efer |= host_efer & ignore_bits; From 3df5c37e55c8f9e20d0e9bf01a4e875f72a738f3 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 26 Sep 2018 09:23:44 -0700 Subject: [PATCH 136/204] KVM: nVMX: try to set EFER bits correctly when initializing controls VM_ENTRY_IA32E_MODE and VM_{ENTRY,EXIT}_LOAD_IA32_EFER will be explicitly set/cleared as needed by vmx_set_efer(), but attempt to get the bits set correctly when intializing the control fields. Setting the value correctly can avoid multiple VMWrites. Signed-off-by: Sean Christopherson Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 51 +++++++++++++++++++++++++++++++--------------- 1 file changed, 35 insertions(+), 16 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 7a245f1dca87..38c0db14e996 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -11972,6 +11972,17 @@ static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu) (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02); } +static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) +{ + if (vmx->nested.nested_run_pending && + (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) + return vmcs12->guest_ia32_efer; + else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) + return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME); + else + return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME); +} + static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -12116,6 +12127,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 exec_control, vmcs12_exec_ctrl; + u64 guest_efer; if (vmx->nested.dirty_vmcs12) { prepare_vmcs02_full(vcpu, vmcs12); @@ -12264,19 +12276,32 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); + /* + * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE + * are emulated by vmx_set_efer(), below, but speculate on the + * related bits (if supported by the CPU) in the hope that we can + * avoid VMWrites during vmx_set_efer(). + */ + guest_efer = nested_vmx_calc_efer(vmx, vmcs12); + /* L2->L1 exit controls are emulated - the hardware exit is to L0 so * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER * bits are further modified by vmx_set_efer() below. */ - vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl); + exec_control = vmcs_config.vmexit_ctrl; + if (cpu_has_load_ia32_efer && guest_efer != host_efer) + exec_control |= VM_EXIT_LOAD_IA32_EFER; + vm_exit_controls_init(vmx, exec_control); - /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are - * emulated by vmx_set_efer(), below. - */ - vm_entry_controls_init(vmx, - (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER & - ~VM_ENTRY_IA32E_MODE) | - (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE)); + exec_control = (vmcs12->vm_entry_controls | vmcs_config.vmentry_ctrl) & + ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER; + if (cpu_has_load_ia32_efer) { + if (guest_efer & EFER_LMA) + exec_control |= VM_ENTRY_IA32E_MODE; + if (guest_efer != host_efer) + exec_control |= VM_ENTRY_LOAD_IA32_EFER; + } + vm_entry_controls_init(vmx, exec_control); if (vmx->nested.nested_run_pending && (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) { @@ -12350,14 +12375,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vmx_set_cr4(vcpu, vmcs12->guest_cr4); vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); - if (vmx->nested.nested_run_pending && - (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) - vcpu->arch.efer = vmcs12->guest_ia32_efer; - else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) - vcpu->arch.efer |= (EFER_LMA | EFER_LME); - else - vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); - /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ + vcpu->arch.efer = guest_efer; + /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ vmx_set_efer(vcpu, vcpu->arch.efer); /* From d63907dc7dd11d98c8ffbdaf8311987e5a508744 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 26 Sep 2018 09:23:45 -0700 Subject: [PATCH 137/204] KVM: nVMX: rename enter_vmx_non_root_mode to nested_vmx_enter_non_root_mode ...to be more consistent with the nested VMX nomenclature. Signed-off-by: Sean Christopherson Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 38c0db14e996..b0f36d82ae7b 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -12644,7 +12644,7 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, * If exit_qual is NULL, this is being called from state restore (either RSM * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume. */ -static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual) +static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); @@ -12813,7 +12813,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) */ vmx->nested.nested_run_pending = 1; - ret = enter_vmx_non_root_mode(vcpu, &exit_qual); + ret = nested_vmx_enter_non_root_mode(vcpu, &exit_qual); if (ret) { nested_vmx_entry_failure(vcpu, vmcs12, ret, exit_qual); vmx->nested.nested_run_pending = 0; @@ -12824,7 +12824,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) vmx->vcpu.arch.l1tf_flush_l1d = true; /* - * Must happen outside of enter_vmx_non_root_mode() as it will + * Must happen outside of nested_vmx_enter_non_root_mode() as it will * also be used as part of restoring nVMX state for * snapshot restore (migration). * @@ -14055,7 +14055,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase) if (vmx->nested.smm.guest_mode) { vcpu->arch.hflags &= ~HF_SMM_MASK; - ret = enter_vmx_non_root_mode(vcpu, NULL); + ret = nested_vmx_enter_non_root_mode(vcpu, NULL); vcpu->arch.hflags |= HF_SMM_MASK; if (ret) return ret; @@ -14261,7 +14261,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, return -EINVAL; vmx->nested.dirty_vmcs12 = true; - ret = enter_vmx_non_root_mode(vcpu, NULL); + ret = nested_vmx_enter_non_root_mode(vcpu, NULL); if (ret) return -EINVAL; From 7671ce21b13b9596163a29f4712cb2451a9b97dc Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 26 Sep 2018 09:23:46 -0700 Subject: [PATCH 138/204] KVM: nVMX: move check_vmentry_postreqs() call to nested_vmx_enter_non_root_mode() In preparation of supporting checkpoint/restore for nested state, commit ca0bde28f2ed ("kvm: nVMX: Split VMCS checks from nested_vmx_run()") modified check_vmentry_postreqs() to only perform the guest EFER consistency checks when nested_run_pending is true. But, in the normal nested VMEntry flow, nested_run_pending is only set after check_vmentry_postreqs(), i.e. the consistency check is being skipped. Alternatively, nested_run_pending could be set prior to calling check_vmentry_postreqs() in nested_vmx_run(), but placing the consistency checks in nested_vmx_enter_non_root_mode() allows us to split prepare_vmcs02() and interleave the preparation with the consistency checks without having to change the call sites of nested_vmx_enter_non_root_mode(). In other words, the rest of the consistency check code in nested_vmx_run() will be joining the postreqs checks in future patches. Fixes: ca0bde28f2ed ("kvm: nVMX: Split VMCS checks from nested_vmx_run()") Signed-off-by: Sean Christopherson Cc: Jim Mattson Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index b0f36d82ae7b..9326f6ab8a36 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -12658,6 +12658,9 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual) if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu)) evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu); + if (from_vmentry && check_vmentry_postreqs(vcpu, vmcs12, exit_qual)) + return EXIT_REASON_INVALID_STATE; + enter_guest_mode(vcpu); if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) @@ -12800,13 +12803,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) */ skip_emulated_instruction(vcpu); - ret = check_vmentry_postreqs(vcpu, vmcs12, &exit_qual); - if (ret) { - nested_vmx_entry_failure(vcpu, vmcs12, - EXIT_REASON_INVALID_STATE, exit_qual); - return 1; - } - /* * We're finally done with prerequisite checking, and can start with * the nested entry. From a633e41e736279c6d3174f52deeb9b8b5fa85e13 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 26 Sep 2018 09:23:47 -0700 Subject: [PATCH 139/204] KVM: nVMX: assimilate nested_vmx_entry_failure() into nested_vmx_enter_non_root_mode() Handling all VMExits due to failed consistency checks on VMEnter in nested_vmx_enter_non_root_mode() consolidates all relevant code into a single location, and removing nested_vmx_entry_failure() eliminates a confusing function name and label. For a VMEntry, "fail" and its derivatives has a very specific meaning due to the different behavior of a VMEnter VMFail versus VMExit, i.e. it wasn't obvious that nested_vmx_entry_failure() handled VMExit scenarios. Signed-off-by: Sean Christopherson Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 78 +++++++++++++++++++++------------------------- 1 file changed, 36 insertions(+), 42 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 9326f6ab8a36..7906dd4899e7 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2048,9 +2048,6 @@ static inline bool is_nmi(u32 intr_info) static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, u32 exit_intr_info, unsigned long exit_qualification); -static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12, - u32 reason, unsigned long qualification); static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) { @@ -12640,26 +12637,29 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, return 0; } +static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12); + /* - * If exit_qual is NULL, this is being called from state restore (either RSM + * If from_vmentry is false, this is being called from state restore (either RSM * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume. */ -static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual) +static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, + bool from_vmentry) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - bool from_vmentry = !!exit_qual; - u32 dummy_exit_qual; bool evaluate_pending_interrupts; - int r = 0; + u32 exit_reason = EXIT_REASON_INVALID_STATE; + u32 exit_qual; evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) & (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING); if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu)) evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu); - if (from_vmentry && check_vmentry_postreqs(vcpu, vmcs12, exit_qual)) - return EXIT_REASON_INVALID_STATE; + if (from_vmentry && check_vmentry_postreqs(vcpu, vmcs12, &exit_qual)) + goto vmentry_fail_vmexit; enter_guest_mode(vcpu); @@ -12674,18 +12674,17 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual) if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) vcpu->arch.tsc_offset += vmcs12->tsc_offset; - r = EXIT_REASON_INVALID_STATE; - if (prepare_vmcs02(vcpu, vmcs12, from_vmentry ? exit_qual : &dummy_exit_qual)) + if (prepare_vmcs02(vcpu, vmcs12, &exit_qual)) goto fail; if (from_vmentry) { nested_get_vmcs12_pages(vcpu); - r = EXIT_REASON_MSR_LOAD_FAIL; - *exit_qual = nested_vmx_load_msr(vcpu, - vmcs12->vm_entry_msr_load_addr, - vmcs12->vm_entry_msr_load_count); - if (*exit_qual) + exit_reason = EXIT_REASON_MSR_LOAD_FAIL; + exit_qual = nested_vmx_load_msr(vcpu, + vmcs12->vm_entry_msr_load_addr, + vmcs12->vm_entry_msr_load_count); + if (exit_qual) goto fail; } else { /* @@ -12723,12 +12722,28 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual) */ return 0; + /* + * A failed consistency check that leads to a VMExit during L1's + * VMEnter to L2 is a variation of a normal VMexit, as explained in + * 26.7 "VM-entry failures during or after loading guest state". + */ fail: if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) vcpu->arch.tsc_offset -= vmcs12->tsc_offset; leave_guest_mode(vcpu); vmx_switch_vmcs(vcpu, &vmx->vmcs01); - return r; + + if (!from_vmentry) + return 1; + +vmentry_fail_vmexit: + load_vmcs12_host_state(vcpu, vmcs12); + vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY; + vmcs12->exit_qualification = exit_qual; + nested_vmx_succeed(vcpu); + if (enable_shadow_vmcs) + vmx->nested.sync_shadow_vmcs = true; + return 1; } /* @@ -12740,7 +12755,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) struct vmcs12 *vmcs12; struct vcpu_vmx *vmx = to_vmx(vcpu); u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu); - u32 exit_qual; int ret; if (!nested_vmx_check_permission(vcpu)) @@ -12809,9 +12823,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) */ vmx->nested.nested_run_pending = 1; - ret = nested_vmx_enter_non_root_mode(vcpu, &exit_qual); + ret = nested_vmx_enter_non_root_mode(vcpu, true); if (ret) { - nested_vmx_entry_failure(vcpu, vmcs12, ret, exit_qual); vmx->nested.nested_run_pending = 0; return 1; } @@ -13609,25 +13622,6 @@ static void vmx_leave_nested(struct kvm_vcpu *vcpu) free_nested(to_vmx(vcpu)); } -/* - * L1's failure to enter L2 is a subset of a normal exit, as explained in - * 23.7 "VM-entry failures during or after loading guest state" (this also - * lists the acceptable exit-reason and exit-qualification parameters). - * It should only be called before L2 actually succeeded to run, and when - * vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss). - */ -static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12, - u32 reason, unsigned long qualification) -{ - load_vmcs12_host_state(vcpu, vmcs12); - vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY; - vmcs12->exit_qualification = qualification; - nested_vmx_succeed(vcpu); - if (enable_shadow_vmcs) - to_vmx(vcpu)->nested.sync_shadow_vmcs = true; -} - static int vmx_check_intercept(struct kvm_vcpu *vcpu, struct x86_instruction_info *info, enum x86_intercept_stage stage) @@ -14051,7 +14045,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase) if (vmx->nested.smm.guest_mode) { vcpu->arch.hflags &= ~HF_SMM_MASK; - ret = nested_vmx_enter_non_root_mode(vcpu, NULL); + ret = nested_vmx_enter_non_root_mode(vcpu, false); vcpu->arch.hflags |= HF_SMM_MASK; if (ret) return ret; @@ -14257,7 +14251,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, return -EINVAL; vmx->nested.dirty_vmcs12 = true; - ret = nested_vmx_enter_non_root_mode(vcpu, NULL); + ret = nested_vmx_enter_non_root_mode(vcpu, false); if (ret) return -EINVAL; From 39f9c3885c70fd08ff0f8031d32d6fb1098f0118 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 26 Sep 2018 09:23:48 -0700 Subject: [PATCH 140/204] KVM: vVMX: rename label for post-enter_guest_mode consistency check Rename 'fail' to 'vmentry_fail_vmexit_guest_mode' to make it more obvious that it's simply a different entry point to the VMExit path, whose purpose is unwind the updates done prior to calling prepare_vmcs02(). Signed-off-by: Sean Christopherson Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 7906dd4899e7..7cae4f8c4f23 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -12675,7 +12675,7 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, vcpu->arch.tsc_offset += vmcs12->tsc_offset; if (prepare_vmcs02(vcpu, vmcs12, &exit_qual)) - goto fail; + goto vmentry_fail_vmexit_guest_mode; if (from_vmentry) { nested_get_vmcs12_pages(vcpu); @@ -12685,7 +12685,7 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, vmcs12->vm_entry_msr_load_addr, vmcs12->vm_entry_msr_load_count); if (exit_qual) - goto fail; + goto vmentry_fail_vmexit_guest_mode; } else { /* * The MMU is not initialized to point at the right entities yet and @@ -12727,7 +12727,7 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, * VMEnter to L2 is a variation of a normal VMexit, as explained in * 26.7 "VM-entry failures during or after loading guest state". */ -fail: +vmentry_fail_vmexit_guest_mode: if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) vcpu->arch.tsc_offset -= vmcs12->tsc_offset; leave_guest_mode(vcpu); From 860ff2aa84d386ba3576ef5c6e2b6dcb7de26a05 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 26 Sep 2018 09:23:49 -0700 Subject: [PATCH 141/204] KVM: VMX: remove ASSERT() on vmx->pml_pg validity vmx->pml_pg is allocated by vmx_create_vcpu() and is only nullified when the vCPU is destroyed by vmx_free_vcpu(). Remove the ASSERTs on vmx->pml_pg, there is no need to carry debug code that provides no value to the current code base. Signed-off-by: Sean Christopherson Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 7cae4f8c4f23..d578966bc6d0 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -6653,7 +6653,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP); if (enable_pml) { - ASSERT(vmx->pml_pg); vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); } @@ -12347,7 +12346,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, * since we always flush the log on each vmexit, this happens * to be equivalent to simply resetting the fields in vmcs02. */ - ASSERT(vmx->pml_pg); vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); } From 09abe32002665f97e61b42af0a045080663e9e7d Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 26 Sep 2018 09:23:50 -0700 Subject: [PATCH 142/204] KVM: nVMX: split pieces of prepare_vmcs02() to prepare_vmcs02_early() Add prepare_vmcs02_early() and move pieces of prepare_vmcs02() to the new function. prepare_vmcs02_early() writes the bits of vmcs02 that a) must be in place to pass the VMFail consistency checks (assuming vmcs12 is valid) and b) are needed recover from a VMExit, e.g. host state that is loaded on VMExit. Splitting the functionality will enable KVM to leverage hardware to do VMFail consistency checks via a dry run of VMEnter and recover from a potential VMExit without having to fully initialize vmcs02. Add prepare_vmcs02_constant_state() to handle writing vmcs02 state that comes from vmcs01 and never changes, i.e. we don't need to rewrite any of the vmcs02 that is effectively constant once defined. Signed-off-by: Sean Christopherson Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 500 ++++++++++++++++++++++++--------------------- 1 file changed, 269 insertions(+), 231 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d578966bc6d0..aa35ab9643ac 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -11979,112 +11979,48 @@ static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME); } -static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) +static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) { - struct vcpu_vmx *vmx = to_vmx(vcpu); - - vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); - vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); - vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); - vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); - vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); - vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); - vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); - vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); - vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); - vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); - vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); - vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); - vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); - vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); - vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); - vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); - vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); - vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); - vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); - vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); - vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); - vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); - vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); - vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); - vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); - vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); - vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); - vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); - vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); - vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); - vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); - - vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); - vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, - vmcs12->guest_pending_dbg_exceptions); - vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); - vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); - - if (nested_cpu_has_xsaves(vmcs12)) - vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); - vmcs_write64(VMCS_LINK_POINTER, -1ull); - - if (cpu_has_vmx_posted_intr()) - vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR); - /* - * Whether page-faults are trapped is determined by a combination of - * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. - * If enable_ept, L0 doesn't care about page faults and we should - * set all of these to L1's desires. However, if !enable_ept, L0 does - * care about (at least some) page faults, and because it is not easy - * (if at all possible?) to merge L0 and L1's desires, we simply ask - * to exit on each and every L2 page fault. This is done by setting - * MASK=MATCH=0 and (see below) EB.PF=1. - * Note that below we don't need special code to set EB.PF beyond the - * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, - * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when - * !enable_ept, EB.PF is 1, so the "or" will always be 1. + * If we have never launched vmcs02, set the constant vmcs02 state + * according to L0's settings (vmcs12 is irrelevant here). Host + * fields that come from L0 and are not constant, e.g. HOST_CR3, + * will be set as needed prior to VMLAUNCH/VMRESUME. */ - vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, - enable_ept ? vmcs12->page_fault_error_code_mask : 0); - vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, - enable_ept ? vmcs12->page_fault_error_code_match : 0); + if (vmx->nested.vmcs02.launched) + return; /* All VMFUNCs are currently emulated through L0 vmexits. */ if (cpu_has_vmx_vmfunc()) vmcs_write64(VM_FUNCTION_CONTROL, 0); - if (cpu_has_vmx_apicv()) { - vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); - vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1); - vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2); - vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); - } + if (cpu_has_vmx_posted_intr()) + vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR); + + if (cpu_has_vmx_msr_bitmap()) + vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); + + if (enable_pml) + vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); /* - * Set host-state according to L0's settings (vmcs12 is irrelevant here) - * Some constant fields are set here by vmx_set_constant_host_state(). - * Other fields are different per CPU, and will be set later when - * vmx_vcpu_load() is called, and when vmx_prepare_switch_to_guest() - * is called. - */ - vmx_set_constant_host_state(vmx); - - /* - * Set the MSR load/store lists to match L0's settings. + * Set the MSR load/store lists to match L0's settings. Only the + * addresses are constant (for vmcs02), the counts can change based + * on L2's behavior, e.g. switching to/from long mode. */ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); - set_cr4_guest_host_mask(vmx); + vmx_set_constant_host_state(vmx); +} - if (kvm_mpx_supported()) { - if (vmx->nested.nested_run_pending && - (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) - vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); - else - vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs); - } +static void prepare_vmcs02_early_full(struct vcpu_vmx *vmx, + struct vmcs12 *vmcs12) +{ + prepare_vmcs02_constant_state(vmx); + + vmcs_write64(VMCS_LINK_POINTER, -1ull); if (enable_vpid) { if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) @@ -12092,79 +12028,28 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) else vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); } - - /* - * L1 may access the L2's PDPTR, so save them to construct vmcs12 - */ - if (enable_ept) { - vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); - vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); - vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); - vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); - } - - if (cpu_has_vmx_msr_bitmap()) - vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); } -/* - * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested - * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it - * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 - * guest in a way that will both be appropriate to L1's requests, and our - * needs. In addition to modifying the active vmcs (which is vmcs02), this - * function also has additional necessary side-effects, like setting various - * vcpu->arch fields. - * Returns 0 on success, 1 on failure. Invalid state exit qualification code - * is assigned to entry_failure_code on failure. - */ -static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, - u32 *entry_failure_code) +static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) { - struct vcpu_vmx *vmx = to_vmx(vcpu); u32 exec_control, vmcs12_exec_ctrl; - u64 guest_efer; + u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12); - if (vmx->nested.dirty_vmcs12) { - prepare_vmcs02_full(vcpu, vmcs12); - vmx->nested.dirty_vmcs12 = false; - } + if (vmx->nested.dirty_vmcs12) + prepare_vmcs02_early_full(vmx, vmcs12); /* - * First, the fields that are shadowed. This must be kept in sync - * with vmx_shadow_fields.h. + * HOST_RSP is normally set correctly in vmx_vcpu_run() just before + * entry, but only if the current (host) sp changed from the value + * we wrote last (vmx->host_rsp). This cache is no longer relevant + * if we switch vmcs, and rather than hold a separate cache per vmcs, + * here we just force the write to happen on entry. */ + vmx->host_rsp = 0; - vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); - vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); - vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); - vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); - vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); - - if (vmx->nested.nested_run_pending && - (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { - kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); - vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); - } else { - kvm_set_dr(vcpu, 7, vcpu->arch.dr7); - vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); - } - if (vmx->nested.nested_run_pending) { - vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, - vmcs12->vm_entry_intr_info_field); - vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, - vmcs12->vm_entry_exception_error_code); - vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, - vmcs12->vm_entry_instruction_len); - vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, - vmcs12->guest_interruptibility_info); - vmx->loaded_vmcs->nmi_known_unmasked = - !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI); - } else { - vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); - } - vmx_set_rflags(vcpu, vmcs12->guest_rflags); - + /* + * PIN CONTROLS + */ exec_control = vmcs12->pin_based_vm_exec_control; /* Preemption timer setting is computed directly in vmx_vcpu_run. */ @@ -12179,13 +12064,43 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, } else { exec_control &= ~PIN_BASED_POSTED_INTR; } - vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control); - vmx->nested.preemption_timer_expired = false; - if (nested_cpu_has_preemption_timer(vmcs12)) - vmx_start_preemption_timer(vcpu); + /* + * EXEC CONTROLS + */ + exec_control = vmx_exec_control(vmx); /* L0's desires */ + exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; + exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; + exec_control &= ~CPU_BASED_TPR_SHADOW; + exec_control |= vmcs12->cpu_based_vm_exec_control; + /* + * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if + * nested_get_vmcs12_pages can't fix it up, the illegal value + * will result in a VM entry failure. + */ + if (exec_control & CPU_BASED_TPR_SHADOW) { + vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull); + vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); + } else { +#ifdef CONFIG_X86_64 + exec_control |= CPU_BASED_CR8_LOAD_EXITING | + CPU_BASED_CR8_STORE_EXITING; +#endif + } + + /* + * A vmexit (to either L1 hypervisor or L0 userspace) is always needed + * for I/O port accesses. + */ + exec_control &= ~CPU_BASED_USE_IO_BITMAPS; + exec_control |= CPU_BASED_UNCOND_IO_EXITING; + vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); + + /* + * SECONDARY EXEC CONTROLS + */ if (cpu_has_secondary_exec_ctrls()) { exec_control = vmx->secondary_exec_control; @@ -12226,69 +12141,13 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, } /* - * HOST_RSP is normally set correctly in vmx_vcpu_run() just before - * entry, but only if the current (host) sp changed from the value - * we wrote last (vmx->host_rsp). This cache is no longer relevant - * if we switch vmcs, and rather than hold a separate cache per vmcs, - * here we just force the write to happen on entry. - */ - vmx->host_rsp = 0; - - exec_control = vmx_exec_control(vmx); /* L0's desires */ - exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; - exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; - exec_control &= ~CPU_BASED_TPR_SHADOW; - exec_control |= vmcs12->cpu_based_vm_exec_control; - - /* - * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if - * nested_get_vmcs12_pages can't fix it up, the illegal value - * will result in a VM entry failure. - */ - if (exec_control & CPU_BASED_TPR_SHADOW) { - vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull); - vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); - } else { -#ifdef CONFIG_X86_64 - exec_control |= CPU_BASED_CR8_LOAD_EXITING | - CPU_BASED_CR8_STORE_EXITING; -#endif - } - - /* - * A vmexit (to either L1 hypervisor or L0 userspace) is always needed - * for I/O port accesses. - */ - exec_control &= ~CPU_BASED_USE_IO_BITMAPS; - exec_control |= CPU_BASED_UNCOND_IO_EXITING; - - vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); - - /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the - * bitwise-or of what L1 wants to trap for L2, and what we want to - * trap. Note that CR0.TS also needs updating - we do this later. - */ - update_exception_bitmap(vcpu); - vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; - vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); - - /* + * ENTRY CONTROLS + * * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE - * are emulated by vmx_set_efer(), below, but speculate on the - * related bits (if supported by the CPU) in the hope that we can - * avoid VMWrites during vmx_set_efer(). + * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate + * on the related bits (if supported by the CPU) in the hope that + * we can avoid VMWrites during vmx_set_efer(). */ - guest_efer = nested_vmx_calc_efer(vmx, vmcs12); - - /* L2->L1 exit controls are emulated - the hardware exit is to L0 so - * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER - * bits are further modified by vmx_set_efer() below. - */ - exec_control = vmcs_config.vmexit_ctrl; - if (cpu_has_load_ia32_efer && guest_efer != host_efer) - exec_control |= VM_EXIT_LOAD_IA32_EFER; - vm_exit_controls_init(vmx, exec_control); - exec_control = (vmcs12->vm_entry_controls | vmcs_config.vmentry_ctrl) & ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER; if (cpu_has_load_ia32_efer) { @@ -12299,6 +12158,194 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, } vm_entry_controls_init(vmx, exec_control); + /* + * EXIT CONTROLS + * + * L2->L1 exit controls are emulated - the hardware exit is to L0 so + * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER + * bits may be modified by vmx_set_efer() in prepare_vmcs02(). + */ + exec_control = vmcs_config.vmexit_ctrl; + if (cpu_has_load_ia32_efer && guest_efer != host_efer) + exec_control |= VM_EXIT_LOAD_IA32_EFER; + vm_exit_controls_init(vmx, exec_control); + + /* + * Conceptually we want to copy the PML address and index from + * vmcs01 here, and then back to vmcs01 on nested vmexit. But, + * since we always flush the log on each vmexit and never change + * the PML address (once set), this happens to be equivalent to + * simply resetting the index in vmcs02. + */ + if (enable_pml) + vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); + + /* + * Interrupt/Exception Fields + */ + if (vmx->nested.nested_run_pending) { + vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, + vmcs12->vm_entry_intr_info_field); + vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, + vmcs12->vm_entry_exception_error_code); + vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, + vmcs12->vm_entry_instruction_len); + vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, + vmcs12->guest_interruptibility_info); + vmx->loaded_vmcs->nmi_known_unmasked = + !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI); + } else { + vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); + } +} + +static void prepare_vmcs02_full(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) +{ + vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); + vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); + vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); + vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); + vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); + vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); + vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); + vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); + vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); + vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); + vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); + vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); + vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); + vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); + vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); + vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); + vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); + vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); + vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); + vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); + vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); + vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); + vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); + vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); + vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); + vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); + vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); + vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); + vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); + vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); + vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); + + vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); + vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, + vmcs12->guest_pending_dbg_exceptions); + vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); + vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); + + if (nested_cpu_has_xsaves(vmcs12)) + vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); + + /* + * Whether page-faults are trapped is determined by a combination of + * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. + * If enable_ept, L0 doesn't care about page faults and we should + * set all of these to L1's desires. However, if !enable_ept, L0 does + * care about (at least some) page faults, and because it is not easy + * (if at all possible?) to merge L0 and L1's desires, we simply ask + * to exit on each and every L2 page fault. This is done by setting + * MASK=MATCH=0 and (see below) EB.PF=1. + * Note that below we don't need special code to set EB.PF beyond the + * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, + * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when + * !enable_ept, EB.PF is 1, so the "or" will always be 1. + */ + vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, + enable_ept ? vmcs12->page_fault_error_code_mask : 0); + vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, + enable_ept ? vmcs12->page_fault_error_code_match : 0); + + if (cpu_has_vmx_apicv()) { + vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); + vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1); + vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2); + vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); + } + + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); + + set_cr4_guest_host_mask(vmx); + + if (kvm_mpx_supported()) { + if (vmx->nested.nested_run_pending && + (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) + vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); + else + vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs); + } + + /* + * L1 may access the L2's PDPTR, so save them to construct vmcs12 + */ + if (enable_ept) { + vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); + vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); + vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); + vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); + } +} + +/* + * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested + * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it + * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 + * guest in a way that will both be appropriate to L1's requests, and our + * needs. In addition to modifying the active vmcs (which is vmcs02), this + * function also has additional necessary side-effects, like setting various + * vcpu->arch fields. + * Returns 0 on success, 1 on failure. Invalid state exit qualification code + * is assigned to entry_failure_code on failure. + */ +static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, + u32 *entry_failure_code) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (vmx->nested.dirty_vmcs12) { + prepare_vmcs02_full(vmx, vmcs12); + vmx->nested.dirty_vmcs12 = false; + } + + /* + * First, the fields that are shadowed. This must be kept in sync + * with vmx_shadow_fields.h. + */ + + vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); + vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); + vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); + vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); + vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); + + if (vmx->nested.nested_run_pending && + (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { + kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); + vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); + } else { + kvm_set_dr(vcpu, 7, vcpu->arch.dr7); + vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); + } + vmx_set_rflags(vcpu, vmcs12->guest_rflags); + + vmx->nested.preemption_timer_expired = false; + if (nested_cpu_has_preemption_timer(vmcs12)) + vmx_start_preemption_timer(vcpu); + + /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the + * bitwise-or of what L1 wants to trap for L2, and what we want to + * trap. Note that CR0.TS also needs updating - we do this later. + */ + update_exception_bitmap(vcpu); + vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; + vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); + if (vmx->nested.nested_run_pending && (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) { vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); @@ -12339,17 +12386,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, } } - if (enable_pml) { - /* - * Conceptually we want to copy the PML address and index from - * vmcs01 here, and then back to vmcs01 on nested vmexit. But, - * since we always flush the log on each vmexit, this happens - * to be equivalent to simply resetting the fields in vmcs02. - */ - vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); - vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); - } - if (nested_cpu_has_ept(vmcs12)) nested_ept_init_mmu_context(vcpu); else if (nested_cpu_has2(vmcs12, @@ -12370,7 +12406,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vmx_set_cr4(vcpu, vmcs12->guest_cr4); vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); - vcpu->arch.efer = guest_efer; + vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12); /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ vmx_set_efer(vcpu, vcpu->arch.efer); @@ -12672,6 +12708,8 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) vcpu->arch.tsc_offset += vmcs12->tsc_offset; + prepare_vmcs02_early(vmx, vmcs12); + if (prepare_vmcs02(vcpu, vmcs12, &exit_qual)) goto vmentry_fail_vmexit_guest_mode; From 9d6105b2b59fbe899eb07ff03d3993ba910f8b85 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 26 Sep 2018 09:23:51 -0700 Subject: [PATCH 143/204] KVM: nVMX: initialize vmcs02 constant exactly once (per VMCS) Add a dedicated flag to track if vmcs02 has been initialized, i.e. the constant state for vmcs02 has been written to the backing VMCS. The launched flag (in struct loaded_vmcs) gets cleared on logical CPU migration to mirror hardware behavior[1], i.e. using the launched flag to determine whether or not vmcs02 constant state needs to be initialized results in unnecessarily re-initializing the VMCS when migrating between logical CPUS. [1] The active VMCS needs to be VMCLEARed before it can be migrated to a different logical CPU. Hardware's VMCS cache is per-CPU and is not coherent between CPUs. VMCLEAR flushes the cache so that any dirty data is written back to memory. A side effect of VMCLEAR is that it also clears the VMCS's internal launch flag, which KVM must mirror because VMRESUME must be used to run a previously launched VMCS. Suggested-by: Jim Mattson Signed-off-by: Sean Christopherson Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index aa35ab9643ac..90e2d683a680 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -833,6 +833,13 @@ struct nested_vmx { bool sync_shadow_vmcs; bool dirty_vmcs12; + /* + * vmcs02 has been initialized, i.e. state that is constant for + * vmcs02 has been written to the backing VMCS. Initialization + * is delayed until L1 actually attempts to run a nested VM. + */ + bool vmcs02_initialized; + bool change_vmcs01_virtual_apic_mode; /* L2 must run next, and mustn't decide to exit to L1. */ @@ -8278,6 +8285,7 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) vmx->nested.vpid02 = allocate_vpid(); + vmx->nested.vmcs02_initialized = false; vmx->nested.vmxon = true; return 0; @@ -11982,13 +11990,14 @@ static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) { /* - * If we have never launched vmcs02, set the constant vmcs02 state + * If vmcs02 hasn't been initialized, set the constant vmcs02 state * according to L0's settings (vmcs12 is irrelevant here). Host * fields that come from L0 and are not constant, e.g. HOST_CR3, * will be set as needed prior to VMLAUNCH/VMRESUME. */ - if (vmx->nested.vmcs02.launched) + if (vmx->nested.vmcs02_initialized) return; + vmx->nested.vmcs02_initialized = true; /* All VMFUNCs are currently emulated through L0 vmexits. */ if (cpu_has_vmx_vmfunc()) From 16fb9a46c54d1617dbc9b6baf0538abf9c004421 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 26 Sep 2018 09:23:52 -0700 Subject: [PATCH 144/204] KVM: nVMX: do early preparation of vmcs02 before check_vmentry_postreqs() In anticipation of using vmcs02 to do early consistency checks, move the early preparation of vmcs02 prior to checking the postreqs. The downside of this approach is that we'll unnecessary load vmcs02 in the case that check_vmentry_postreqs() fails, but that is essentially our slow path anyways (not actually slow, but it's the path we don't really care about optimizing). Signed-off-by: Sean Christopherson Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 90e2d683a680..57379c88fcbd 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -12701,11 +12701,6 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu)) evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu); - if (from_vmentry && check_vmentry_postreqs(vcpu, vmcs12, &exit_qual)) - goto vmentry_fail_vmexit; - - enter_guest_mode(vcpu); - if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); if (kvm_mpx_supported() && @@ -12714,17 +12709,23 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); + prepare_vmcs02_early(vmx, vmcs12); + + if (from_vmentry) { + nested_get_vmcs12_pages(vcpu); + + if (check_vmentry_postreqs(vcpu, vmcs12, &exit_qual)) + goto vmentry_fail_vmexit; + } + + enter_guest_mode(vcpu); if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) vcpu->arch.tsc_offset += vmcs12->tsc_offset; - prepare_vmcs02_early(vmx, vmcs12); - if (prepare_vmcs02(vcpu, vmcs12, &exit_qual)) goto vmentry_fail_vmexit_guest_mode; if (from_vmentry) { - nested_get_vmcs12_pages(vcpu); - exit_reason = EXIT_REASON_MSR_LOAD_FAIL; exit_qual = nested_vmx_load_msr(vcpu, vmcs12->vm_entry_msr_load_addr, @@ -12776,12 +12777,13 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) vcpu->arch.tsc_offset -= vmcs12->tsc_offset; leave_guest_mode(vcpu); + +vmentry_fail_vmexit: vmx_switch_vmcs(vcpu, &vmx->vmcs01); if (!from_vmentry) return 1; -vmentry_fail_vmexit: load_vmcs12_host_state(vcpu, vmcs12); vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY; vmcs12->exit_qualification = exit_qual; From cb61de2f4819b248dfb3f1380e046ab1276c5227 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 26 Sep 2018 09:23:53 -0700 Subject: [PATCH 145/204] KVM: nVMX: do not skip VMEnter instruction that succeeds A successful VMEnter is essentially a fancy indirect branch that pulls the target RIP from the VMCS. Skipping the instruction is unnecessary (RIP will get overwritten by the VMExit handler) and is problematic because it can incorrectly suppress a #DB due to EFLAGS.TF when a VMFail is detected by hardware (happens after we skip the instruction). Now that vmx_nested_run() is not prematurely skipping the instr, use the full kvm_skip_emulated_instruction() in the VMFail path of nested_vmx_vmexit(). We also need to explicitly update the GUEST_INTERRUPTIBILITY_INFO when loading vmcs12 host state. Signed-off-by: Sean Christopherson Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 21 +++++---------------- 1 file changed, 5 insertions(+), 16 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 57379c88fcbd..cdc4367a554e 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -12855,15 +12855,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) goto out; } - /* - * After this point, the trap flag no longer triggers a singlestep trap - * on the vm entry instructions; don't call kvm_skip_emulated_instruction. - * This is not 100% correct; for performance reasons, we delegate most - * of the checks on host state to the processor. If those fail, - * the singlestep trap is missed. - */ - skip_emulated_instruction(vcpu); - /* * We're finally done with prerequisite checking, and can start with * the nested entry. @@ -13243,6 +13234,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp); kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip); vmx_set_rflags(vcpu, X86_EFLAGS_FIXED); + vmx_set_interrupt_shadow(vcpu, 0); + /* * Note that calling vmx_set_cr0 is important, even if cr0 hasn't * actually changed, because vmx_set_cr0 refers to efer set above. @@ -13636,10 +13629,12 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, * in L1 which thinks it just finished a VMLAUNCH or * VMRESUME instruction, so we need to set the failure * flag and the VM-instruction error field of the VMCS - * accordingly. + * accordingly, and skip the emulated instruction. */ nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); + kvm_skip_emulated_instruction(vcpu); + /* * Restore L1's host state to KVM's software model. We're here * because a consistency check was caught by hardware, which @@ -13648,12 +13643,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, */ nested_vmx_restore_host_state(vcpu); - /* - * The emulated instruction was already skipped in - * nested_vmx_run, but the updated RIP was never - * written back to the vmcs01. - */ - skip_emulated_instruction(vcpu); vmx->fail = 0; } From c37a6116d85c7eda541769a2b3ba4c6c9164002e Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 26 Sep 2018 09:23:54 -0700 Subject: [PATCH 146/204] KVM: nVMX: do not call nested_vmx_succeed() for consistency check VMExit EFLAGS is set to a fixed value on VMExit, calling nested_vmx_succeed() is unnecessary and wrong. Signed-off-by: Sean Christopherson Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index cdc4367a554e..8374ad5a26e2 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -12787,7 +12787,6 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, load_vmcs12_host_state(vcpu, vmcs12); vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY; vmcs12->exit_qualification = exit_qual; - nested_vmx_succeed(vcpu); if (enable_shadow_vmcs) vmx->nested.sync_shadow_vmcs = true; return 1; From 09abb5e3e5e50eef72b09adaa798b7ef0ce9239b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 26 Sep 2018 09:23:55 -0700 Subject: [PATCH 147/204] KVM: nVMX: call kvm_skip_emulated_instruction in nested_vmx_{fail,succeed} ... as every invocation of nested_vmx_{fail,succeed} is immediately followed by a call to kvm_skip_emulated_instruction(). This saves a bit of code and eliminates some silly paths, e.g. nested_vmx_run() ended up with a goto label purely used to call and return kvm_skip_emulated_instruction(). Signed-off-by: Sean Christopherson Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 245 +++++++++++++++++---------------------------- 1 file changed, 91 insertions(+), 154 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 8374ad5a26e2..2d120de9054e 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -8060,35 +8060,37 @@ static int handle_monitor(struct kvm_vcpu *vcpu) /* * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), - * set the success or error code of an emulated VMX instruction, as specified - * by Vol 2B, VMX Instruction Reference, "Conventions". + * set the success or error code of an emulated VMX instruction (as specified + * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated + * instruction. */ -static void nested_vmx_succeed(struct kvm_vcpu *vcpu) +static int nested_vmx_succeed(struct kvm_vcpu *vcpu) { vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); + return kvm_skip_emulated_instruction(vcpu); } -static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu) +static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu) { vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)) | X86_EFLAGS_CF); + return kvm_skip_emulated_instruction(vcpu); } -static void nested_vmx_failValid(struct kvm_vcpu *vcpu, - u32 vm_instruction_error) +static int nested_vmx_failValid(struct kvm_vcpu *vcpu, + u32 vm_instruction_error) { - if (to_vmx(vcpu)->nested.current_vmptr == -1ull) { - /* - * failValid writes the error number to the current VMCS, which - * can't be done there isn't a current VMCS. - */ - nested_vmx_failInvalid(vcpu); - return; - } + /* + * failValid writes the error number to the current VMCS, which + * can't be done if there isn't a current VMCS. + */ + if (to_vmx(vcpu)->nested.current_vmptr == -1ull) + return nested_vmx_failInvalid(vcpu); + vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_SF | X86_EFLAGS_OF)) @@ -8098,6 +8100,7 @@ static void nested_vmx_failValid(struct kvm_vcpu *vcpu, * We don't need to force a shadow sync because * VM_INSTRUCTION_ERROR is not shadowed */ + return kvm_skip_emulated_instruction(vcpu); } static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) @@ -8339,10 +8342,9 @@ static int handle_vmon(struct kvm_vcpu *vcpu) return 1; } - if (vmx->nested.vmxon) { - nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); - return kvm_skip_emulated_instruction(vcpu); - } + if (vmx->nested.vmxon) + return nested_vmx_failValid(vcpu, + VMXERR_VMXON_IN_VMX_ROOT_OPERATION); if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) != VMXON_NEEDED_FEATURES) { @@ -8361,21 +8363,17 @@ static int handle_vmon(struct kvm_vcpu *vcpu) * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; * which replaces physical address width with 32 */ - if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { - nested_vmx_failInvalid(vcpu); - return kvm_skip_emulated_instruction(vcpu); - } + if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) + return nested_vmx_failInvalid(vcpu); page = kvm_vcpu_gpa_to_page(vcpu, vmptr); - if (is_error_page(page)) { - nested_vmx_failInvalid(vcpu); - return kvm_skip_emulated_instruction(vcpu); - } + if (is_error_page(page)) + return nested_vmx_failInvalid(vcpu); + if (*(u32 *)kmap(page) != VMCS12_REVISION) { kunmap(page); kvm_release_page_clean(page); - nested_vmx_failInvalid(vcpu); - return kvm_skip_emulated_instruction(vcpu); + return nested_vmx_failInvalid(vcpu); } kunmap(page); kvm_release_page_clean(page); @@ -8385,8 +8383,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu) if (ret) return ret; - nested_vmx_succeed(vcpu); - return kvm_skip_emulated_instruction(vcpu); + return nested_vmx_succeed(vcpu); } /* @@ -8486,8 +8483,7 @@ static int handle_vmoff(struct kvm_vcpu *vcpu) if (!nested_vmx_check_permission(vcpu)) return 1; free_nested(to_vmx(vcpu)); - nested_vmx_succeed(vcpu); - return kvm_skip_emulated_instruction(vcpu); + return nested_vmx_succeed(vcpu); } /* Emulate the VMCLEAR instruction */ @@ -8503,15 +8499,13 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) if (nested_vmx_get_vmptr(vcpu, &vmptr)) return 1; - if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { - nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS); - return kvm_skip_emulated_instruction(vcpu); - } + if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) + return nested_vmx_failValid(vcpu, + VMXERR_VMCLEAR_INVALID_ADDRESS); - if (vmptr == vmx->nested.vmxon_ptr) { - nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); - return kvm_skip_emulated_instruction(vcpu); - } + if (vmptr == vmx->nested.vmxon_ptr) + return nested_vmx_failValid(vcpu, + VMXERR_VMCLEAR_VMXON_POINTER); if (vmptr == vmx->nested.current_vmptr) nested_release_vmcs12(vmx); @@ -8520,8 +8514,7 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) vmptr + offsetof(struct vmcs12, launch_state), &zero, sizeof(zero)); - nested_vmx_succeed(vcpu); - return kvm_skip_emulated_instruction(vcpu); + return nested_vmx_succeed(vcpu); } static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch); @@ -8677,20 +8670,6 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) vmcs_load(vmx->loaded_vmcs->vmcs); } -/* - * VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was - * used before) all generate the same failure when it is missing. - */ -static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - if (vmx->nested.current_vmptr == -1ull) { - nested_vmx_failInvalid(vcpu); - return 0; - } - return 1; -} - static int handle_vmread(struct kvm_vcpu *vcpu) { unsigned long field; @@ -8703,8 +8682,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu) if (!nested_vmx_check_permission(vcpu)) return 1; - if (!nested_vmx_check_vmcs12(vcpu)) - return kvm_skip_emulated_instruction(vcpu); + if (to_vmx(vcpu)->nested.current_vmptr == -1ull) + return nested_vmx_failInvalid(vcpu); if (!is_guest_mode(vcpu)) vmcs12 = get_vmcs12(vcpu); @@ -8713,20 +8692,18 @@ static int handle_vmread(struct kvm_vcpu *vcpu) * When vmcs->vmcs_link_pointer is -1ull, any VMREAD * to shadowed-field sets the ALU flags for VMfailInvalid. */ - if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) { - nested_vmx_failInvalid(vcpu); - return kvm_skip_emulated_instruction(vcpu); - } + if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) + return nested_vmx_failInvalid(vcpu); vmcs12 = get_shadow_vmcs12(vcpu); } /* Decode instruction info and find the field to read */ field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); /* Read the field, zero-extended to a u64 field_value */ - if (vmcs12_read_any(vmcs12, field, &field_value) < 0) { - nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); - return kvm_skip_emulated_instruction(vcpu); - } + if (vmcs12_read_any(vmcs12, field, &field_value) < 0) + return nested_vmx_failValid(vcpu, + VMXERR_UNSUPPORTED_VMCS_COMPONENT); + /* * Now copy part of this value to register or memory, as requested. * Note that the number of bits actually copied is 32 or 64 depending @@ -8744,8 +8721,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) (is_long_mode(vcpu) ? 8 : 4), NULL); } - nested_vmx_succeed(vcpu); - return kvm_skip_emulated_instruction(vcpu); + return nested_vmx_succeed(vcpu); } @@ -8770,8 +8746,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) if (!nested_vmx_check_permission(vcpu)) return 1; - if (!nested_vmx_check_vmcs12(vcpu)) - return kvm_skip_emulated_instruction(vcpu); + if (vmx->nested.current_vmptr == -1ull) + return nested_vmx_failInvalid(vcpu); if (vmx_instruction_info & (1u << 10)) field_value = kvm_register_readl(vcpu, @@ -8794,11 +8770,9 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) * VMCS," then the "read-only" fields are actually read/write. */ if (vmcs_field_readonly(field) && - !nested_cpu_has_vmwrite_any_field(vcpu)) { - nested_vmx_failValid(vcpu, + !nested_cpu_has_vmwrite_any_field(vcpu)) + return nested_vmx_failValid(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); - return kvm_skip_emulated_instruction(vcpu); - } if (!is_guest_mode(vcpu)) vmcs12 = get_vmcs12(vcpu); @@ -8807,18 +8781,14 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE * to shadowed-field sets the ALU flags for VMfailInvalid. */ - if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) { - nested_vmx_failInvalid(vcpu); - return kvm_skip_emulated_instruction(vcpu); - } + if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) + return nested_vmx_failInvalid(vcpu); vmcs12 = get_shadow_vmcs12(vcpu); - } - if (vmcs12_write_any(vmcs12, field, field_value) < 0) { - nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); - return kvm_skip_emulated_instruction(vcpu); - } + if (vmcs12_write_any(vmcs12, field, field_value) < 0) + return nested_vmx_failValid(vcpu, + VMXERR_UNSUPPORTED_VMCS_COMPONENT); /* * Do not track vmcs12 dirty-state if in guest-mode @@ -8840,8 +8810,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) } } - nested_vmx_succeed(vcpu); - return kvm_skip_emulated_instruction(vcpu); + return nested_vmx_succeed(vcpu); } static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) @@ -8869,33 +8838,29 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) if (nested_vmx_get_vmptr(vcpu, &vmptr)) return 1; - if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { - nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS); - return kvm_skip_emulated_instruction(vcpu); - } + if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) + return nested_vmx_failValid(vcpu, + VMXERR_VMPTRLD_INVALID_ADDRESS); - if (vmptr == vmx->nested.vmxon_ptr) { - nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER); - return kvm_skip_emulated_instruction(vcpu); - } + if (vmptr == vmx->nested.vmxon_ptr) + return nested_vmx_failValid(vcpu, + VMXERR_VMPTRLD_VMXON_POINTER); if (vmx->nested.current_vmptr != vmptr) { struct vmcs12 *new_vmcs12; struct page *page; page = kvm_vcpu_gpa_to_page(vcpu, vmptr); - if (is_error_page(page)) { - nested_vmx_failInvalid(vcpu); - return kvm_skip_emulated_instruction(vcpu); - } + if (is_error_page(page)) + return nested_vmx_failInvalid(vcpu); + new_vmcs12 = kmap(page); if (new_vmcs12->hdr.revision_id != VMCS12_REVISION || (new_vmcs12->hdr.shadow_vmcs && !nested_cpu_has_vmx_shadow_vmcs(vcpu))) { kunmap(page); kvm_release_page_clean(page); - nested_vmx_failValid(vcpu, + return nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); - return kvm_skip_emulated_instruction(vcpu); } nested_release_vmcs12(vmx); @@ -8910,8 +8875,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) set_current_vmptr(vmx, vmptr); } - nested_vmx_succeed(vcpu); - return kvm_skip_emulated_instruction(vcpu); + return nested_vmx_succeed(vcpu); } /* Emulate the VMPTRST instruction */ @@ -8934,8 +8898,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) kvm_inject_page_fault(vcpu, &e); return 1; } - nested_vmx_succeed(vcpu); - return kvm_skip_emulated_instruction(vcpu); + return nested_vmx_succeed(vcpu); } /* Emulate the INVEPT instruction */ @@ -8965,11 +8928,9 @@ static int handle_invept(struct kvm_vcpu *vcpu) types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; - if (type >= 32 || !(types & (1 << type))) { - nested_vmx_failValid(vcpu, + if (type >= 32 || !(types & (1 << type))) + return nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); - return kvm_skip_emulated_instruction(vcpu); - } /* According to the Intel VMX instruction reference, the memory * operand is read even if it isn't needed (e.g., for type==global) @@ -8991,14 +8952,13 @@ static int handle_invept(struct kvm_vcpu *vcpu) case VMX_EPT_EXTENT_CONTEXT: kvm_mmu_sync_roots(vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); - nested_vmx_succeed(vcpu); break; default: BUG_ON(1); break; } - return kvm_skip_emulated_instruction(vcpu); + return nested_vmx_succeed(vcpu); } static u16 nested_get_vpid02(struct kvm_vcpu *vcpu) @@ -9037,11 +8997,9 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) types = (vmx->nested.msrs.vpid_caps & VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8; - if (type >= 32 || !(types & (1 << type))) { - nested_vmx_failValid(vcpu, + if (type >= 32 || !(types & (1 << type))) + return nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); - return kvm_skip_emulated_instruction(vcpu); - } /* according to the intel vmx instruction reference, the memory * operand is read even if it isn't needed (e.g., for type==global) @@ -9053,21 +9011,17 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) kvm_inject_page_fault(vcpu, &e); return 1; } - if (operand.vpid >> 16) { - nested_vmx_failValid(vcpu, + if (operand.vpid >> 16) + return nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); - return kvm_skip_emulated_instruction(vcpu); - } vpid02 = nested_get_vpid02(vcpu); switch (type) { case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: if (!operand.vpid || - is_noncanonical_address(operand.gla, vcpu)) { - nested_vmx_failValid(vcpu, + is_noncanonical_address(operand.gla, vcpu)) + return nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); - return kvm_skip_emulated_instruction(vcpu); - } if (cpu_has_vmx_invvpid_individual_addr()) { __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid02, operand.gla); @@ -9076,11 +9030,9 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) break; case VMX_VPID_EXTENT_SINGLE_CONTEXT: case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: - if (!operand.vpid) { - nested_vmx_failValid(vcpu, + if (!operand.vpid) + return nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); - return kvm_skip_emulated_instruction(vcpu); - } __vmx_flush_tlb(vcpu, vpid02, false); break; case VMX_VPID_EXTENT_ALL_CONTEXT: @@ -9091,9 +9043,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) return kvm_skip_emulated_instruction(vcpu); } - nested_vmx_succeed(vcpu); - - return kvm_skip_emulated_instruction(vcpu); + return nested_vmx_succeed(vcpu); } static int handle_invpcid(struct kvm_vcpu *vcpu) @@ -12806,8 +12756,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) if (!nested_vmx_check_permission(vcpu)) return 1; - if (!nested_vmx_check_vmcs12(vcpu)) - goto out; + if (vmx->nested.current_vmptr == -1ull) + return nested_vmx_failInvalid(vcpu); vmcs12 = get_vmcs12(vcpu); @@ -12817,10 +12767,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) * rather than RFLAGS.ZF, and no error number is stored to the * VM-instruction error field. */ - if (vmcs12->hdr.shadow_vmcs) { - nested_vmx_failInvalid(vcpu); - goto out; - } + if (vmcs12->hdr.shadow_vmcs) + return nested_vmx_failInvalid(vcpu); if (enable_shadow_vmcs) copy_shadow_to_vmcs12(vmx); @@ -12835,24 +12783,18 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) * for misconfigurations which will anyway be caught by the processor * when using the merged vmcs02. */ - if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) { - nested_vmx_failValid(vcpu, - VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS); - goto out; - } + if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) + return nested_vmx_failValid(vcpu, + VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS); - if (vmcs12->launch_state == launch) { - nested_vmx_failValid(vcpu, + if (vmcs12->launch_state == launch) + return nested_vmx_failValid(vcpu, launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS : VMXERR_VMRESUME_NONLAUNCHED_VMCS); - goto out; - } ret = check_vmentry_prereqs(vcpu, vmcs12); - if (ret) { - nested_vmx_failValid(vcpu, ret); - goto out; - } + if (ret) + return nested_vmx_failValid(vcpu, ret); /* * We're finally done with prerequisite checking, and can start with @@ -12891,9 +12833,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) return kvm_vcpu_halt(vcpu); } return 1; - -out: - return kvm_skip_emulated_instruction(vcpu); } /* @@ -13622,7 +13561,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, return; } - + /* * After an early L2 VM-entry failure, we're now back * in L1 which thinks it just finished a VMLAUNCH or @@ -13630,9 +13569,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, * flag and the VM-instruction error field of the VMCS * accordingly, and skip the emulated instruction. */ - nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); - - kvm_skip_emulated_instruction(vcpu); + (void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); /* * Restore L1's host state to KVM's software model. We're here From 5a5e8a15d76e6dd62e3a94fea499057bd048abbc Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 26 Sep 2018 09:23:56 -0700 Subject: [PATCH 148/204] KVM: vmx: write HOST_IA32_EFER in vmx_set_constant_host_state() EFER is constant in the host and writing it once during setup means we can skip writing the host value in add_atomic_switch_msr_special(). Signed-off-by: Sean Christopherson Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 2d120de9054e..5367afe92c99 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2696,7 +2696,8 @@ static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, u64 guest_val, u64 host_val) { vmcs_write64(guest_val_vmcs, guest_val); - vmcs_write64(host_val_vmcs, host_val); + if (host_val_vmcs != HOST_IA32_EFER) + vmcs_write64(host_val_vmcs, host_val); vm_entry_controls_setbit(vmx, entry); vm_exit_controls_setbit(vmx, exit); } @@ -6333,6 +6334,9 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) rdmsr(MSR_IA32_CR_PAT, low32, high32); vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32)); } + + if (cpu_has_load_ia32_efer) + vmcs_write64(HOST_IA32_EFER, host_efer); } static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) From 52017608da33182d0c8b397b21c95f8afab37784 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 26 Sep 2018 09:23:57 -0700 Subject: [PATCH 149/204] KVM: nVMX: add option to perform early consistency checks via H/W KVM defers many VMX consistency checks to the CPU, ostensibly for performance reasons[1], including checks that result in VMFail (as opposed to VMExit). This behavior may be undesirable for some users since this means KVM detects certain classes of VMFail only after it has processed guest state, e.g. emulated MSR load-on-entry. Because there is a strict ordering between checks that cause VMFail and those that cause VMExit, i.e. all VMFail checks are performed before any checks that cause VMExit, we can detect (almost) all VMFail conditions via a dry run of sorts. The almost qualifier exists because some state in vmcs02 comes from L0, e.g. VPID, which means that hardware will never detect an invalid VPID in vmcs12 because it never sees said value. Software must (continue to) explicitly check such fields. After preparing vmcs02 with all state needed to pass the VMFail consistency checks, optionally do a "test" VMEnter with an invalid GUEST_RFLAGS. If the VMEnter results in a VMExit (due to bad guest state), then we can safely say that the nested VMEnter should not VMFail, i.e. any VMFail encountered in nested_vmx_vmexit() must be due to an L0 bug. GUEST_RFLAGS is used to induce VMExit as it is unconditionally loaded on all implementations of VMX, has an invalid value that is writable on a 32-bit system and its consistency check is performed relatively early in all implementations (the exact order of consistency checks is micro-architectural). Unfortunately, since the "passing" case causes a VMExit, KVM must be extra diligent to ensure that host state is restored, e.g. DR7 and RFLAGS are reset on VMExit. Failure to restore RFLAGS.IF is particularly fatal. And of course the extra VMEnter and VMExit impacts performance. The raw overhead of the early consistency checks is ~6% on modern hardware (though this could easily vary based on configuration), while the added latency observed from the L1 VMM is ~10%. The early consistency checks do not occur in a vacuum, e.g. spending more time in L0 can lead to more interrupts being serviced while emulating VMEnter, thereby increasing the latency observed by L1. Add a module param, early_consistency_checks, to provide control over whether or not VMX performs the early consistency checks. In addition to standard on/off behavior, the param accepts a value of -1, which is essentialy an "auto" setting whereby KVM does the early checks only when it thinks it's running on bare metal. When running nested, doing early checks is of dubious value since the resulting behavior is heavily dependent on L0. In the future, the "auto" setting could also be used to default to skipping the early hardware checks for certain configurations/platforms if KVM reaches a state where it has 100% coverage of VMFail conditions. [1] To my knowledge no one has implemented and tested full software emulation of the VMFail consistency checks. Until that happens, one can only speculate about the actual performance overhead of doing all VMFail consistency checks in software. Obviously any code is slower than no code, but in the grand scheme of nested virtualization it's entirely possible the overhead is negligible. Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 142 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 137 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 5367afe92c99..fb4a8694ee52 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -110,6 +110,9 @@ module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); static bool __read_mostly nested = 0; module_param(nested, bool, S_IRUGO); +static bool __read_mostly nested_early_check = 0; +module_param(nested_early_check, bool, S_IRUGO); + static u64 __read_mostly host_xss; static bool __read_mostly enable_pml = 1; @@ -187,6 +190,7 @@ static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX; module_param(ple_window_max, uint, 0444); extern const ulong vmx_return; +extern const ulong vmx_early_consistency_check_return; static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond); @@ -11953,6 +11957,14 @@ static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) return; vmx->nested.vmcs02_initialized = true; + /* + * We don't care what the EPTP value is we just need to guarantee + * it's valid so we don't get a false positive when doing early + * consistency checks. + */ + if (enable_ept && nested_early_check) + vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0)); + /* All VMFUNCs are currently emulated through L0 vmexits. */ if (cpu_has_vmx_vmfunc()) vmcs_write64(VM_FUNCTION_CONTROL, 0); @@ -12006,7 +12018,9 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) * entry, but only if the current (host) sp changed from the value * we wrote last (vmx->host_rsp). This cache is no longer relevant * if we switch vmcs, and rather than hold a separate cache per vmcs, - * here we just force the write to happen on entry. + * here we just force the write to happen on entry. host_rsp will + * also be written unconditionally by nested_vmx_check_vmentry_hw() + * if we are doing early consistency checks via hardware. */ vmx->host_rsp = 0; @@ -12634,12 +12648,124 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, return 0; } +static int __noclone nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + unsigned long cr3, cr4; + + if (!nested_early_check) + return 0; + + if (vmx->msr_autoload.host.nr) + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); + if (vmx->msr_autoload.guest.nr) + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); + + preempt_disable(); + + vmx_prepare_switch_to_guest(vcpu); + + /* + * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS, + * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to + * be written (by preparve_vmcs02()) before the "real" VMEnter, i.e. + * there is no need to preserve other bits or save/restore the field. + */ + vmcs_writel(GUEST_RFLAGS, 0); + + vmcs_writel(HOST_RIP, vmx_early_consistency_check_return); + + cr3 = __get_current_cr3_fast(); + if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { + vmcs_writel(HOST_CR3, cr3); + vmx->loaded_vmcs->host_state.cr3 = cr3; + } + + cr4 = cr4_read_shadow(); + if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { + vmcs_writel(HOST_CR4, cr4); + vmx->loaded_vmcs->host_state.cr4 = cr4; + } + + vmx->__launched = vmx->loaded_vmcs->launched; + + asm( + /* Set HOST_RSP */ + __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t" + "mov %%" _ASM_SP ", %c[host_rsp](%0)\n\t" + + /* Check if vmlaunch of vmresume is needed */ + "cmpl $0, %c[launched](%0)\n\t" + "je 1f\n\t" + __ex(ASM_VMX_VMRESUME) "\n\t" + "jmp 2f\n\t" + "1: " __ex(ASM_VMX_VMLAUNCH) "\n\t" + "jmp 2f\n\t" + "2: " + + /* Set vmx->fail accordingly */ + "setbe %c[fail](%0)\n\t" + + ".pushsection .rodata\n\t" + ".global vmx_early_consistency_check_return\n\t" + "vmx_early_consistency_check_return: " _ASM_PTR " 2b\n\t" + ".popsection" + : + : "c"(vmx), "d"((unsigned long)HOST_RSP), + [launched]"i"(offsetof(struct vcpu_vmx, __launched)), + [fail]"i"(offsetof(struct vcpu_vmx, fail)), + [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)) + : "rax", "cc", "memory" + ); + + vmcs_writel(HOST_RIP, vmx_return); + + preempt_enable(); + + if (vmx->msr_autoload.host.nr) + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); + if (vmx->msr_autoload.guest.nr) + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); + + if (vmx->fail) { + WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) != + VMXERR_ENTRY_INVALID_CONTROL_FIELD); + vmx->fail = 0; + return 1; + } + + /* + * VMExit clears RFLAGS.IF and DR7, even on a consistency check. + */ + local_irq_enable(); + if (hw_breakpoint_active()) + set_debugreg(__this_cpu_read(cpu_dr7), 7); + + /* + * A non-failing VMEntry means we somehow entered guest mode with + * an illegal RIP, and that's just the tip of the iceberg. There + * is no telling what memory has been modified or what state has + * been exposed to unknown code. Hitting this all but guarantees + * a (very critical) hardware issue. + */ + WARN_ON(!(vmcs_read32(VM_EXIT_REASON) & + VMX_EXIT_REASONS_FAILED_VMENTRY)); + + return 0; +} +STACK_FRAME_NON_STANDARD(nested_vmx_check_vmentry_hw); + static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12); /* * If from_vmentry is false, this is being called from state restore (either RSM * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume. ++ * ++ * Returns: ++ * 0 - success, i.e. proceed with actual VMEnter ++ * 1 - consistency check VMExit ++ * -1 - consistency check VMFail */ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) @@ -12668,6 +12794,11 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, if (from_vmentry) { nested_get_vmcs12_pages(vcpu); + if (nested_vmx_check_vmentry_hw(vcpu)) { + vmx_switch_vmcs(vcpu, &vmx->vmcs01); + return -1; + } + if (check_vmentry_postreqs(vcpu, vmcs12, &exit_qual)) goto vmentry_fail_vmexit; } @@ -12804,13 +12935,14 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) * We're finally done with prerequisite checking, and can start with * the nested entry. */ - vmx->nested.nested_run_pending = 1; ret = nested_vmx_enter_non_root_mode(vcpu, true); - if (ret) { - vmx->nested.nested_run_pending = 0; + vmx->nested.nested_run_pending = !ret; + if (ret > 0) return 1; - } + else if (ret) + return nested_vmx_failValid(vcpu, + VMXERR_ENTRY_INVALID_CONTROL_FIELD); /* Hide L1D cache contents from the nested guest. */ vmx->vcpu.arch.l1tf_flush_l1d = true; From 2768c0cc4ad591d846568a989bcdf2c49660a97e Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 26 Sep 2018 09:23:58 -0700 Subject: [PATCH 150/204] KVM: nVMX: WARN if nested run hits VMFail with early consistency checks enabled When early consistency checks are enabled, all VMFail conditions should be caught by nested_vmx_check_vmentry_hw(). Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index fb4a8694ee52..c255686ebd1a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -13581,14 +13581,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, /* trying to cancel vmlaunch/vmresume is a bug */ WARN_ON_ONCE(vmx->nested.nested_run_pending); - /* - * The only expected VM-instruction error is "VM entry with - * invalid control field(s)." Anything else indicates a - * problem with L0. - */ - WARN_ON_ONCE(vmx->fail && (vmcs_read32(VM_INSTRUCTION_ERROR) != - VMXERR_ENTRY_INVALID_CONTROL_FIELD)); - leave_guest_mode(vcpu); if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) @@ -13615,6 +13607,16 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr, vmcs12->vm_exit_msr_store_count)) nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL); + } else { + /* + * The only expected VM-instruction error is "VM entry with + * invalid control field(s)." Anything else indicates a + * problem with L0. And we should never get here with a + * VMFail of any type if early consistency checks are enabled. + */ + WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) != + VMXERR_ENTRY_INVALID_CONTROL_FIELD); + WARN_ON_ONCE(nested_early_check); } vmx_switch_vmcs(vcpu, &vmx->vmcs01); From e6b6c483ebe9b3f82710fc39eff5531b4d80b089 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 8 Oct 2018 19:19:04 +0200 Subject: [PATCH 151/204] KVM: x86: hyperv: fix 'tlb_lush' typo Regardless of whether your TLB is lush or not it still needs flushing. Reported-by: Roman Kagan Reviewed-by: Roman Kagan Signed-off-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/hyperv.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index d81f536d024a..901572b4f6f7 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -499,7 +499,7 @@ struct kvm_vcpu_hv { struct kvm_hyperv_exit exit; struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT]; DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); - cpumask_t tlb_lush; + cpumask_t tlb_flush; }; struct kvm_vcpu_arch { diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index bad4bffdc8b9..f1ae1777519c 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1354,12 +1354,12 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't * analyze it here, flush TLB regardless of the specified address space. */ - cpumask_clear(&hv_vcpu->tlb_lush); + cpumask_clear(&hv_vcpu->tlb_flush); if (all_cpus) { kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP, - NULL, &hv_vcpu->tlb_lush); + NULL, &hv_vcpu->tlb_flush); goto ret_success; } @@ -1397,7 +1397,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, flush_request: kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP, - vcpu_bitmap, &hv_vcpu->tlb_lush); + vcpu_bitmap, &hv_vcpu->tlb_flush); ret_success: /* We always do full TLB flush, set rep_done = rep_cnt. */ From f21dd494506ad002a5b6b32e50a5d4ccac6929fe Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Wed, 10 Oct 2018 17:14:38 +0200 Subject: [PATCH 152/204] KVM: x86: hyperv: optimize sparse VP set processing Rewrite kvm_hv_flush_tlb()/send_ipi_vcpus_mask() making them cleaner and somewhat more optimal. hv_vcpu_in_sparse_set() is converted to sparse_set_to_vcpu_mask() which copies sparse banks u64-at-a-time and then, depending on the num_mismatched_vp_indexes value, returns immediately or does vp index to vcpu index conversion by walking all vCPUs. To support the change and make kvm_hv_send_ipi() look similar to kvm_hv_flush_tlb() send_ipi_vcpus_mask() is introduced. Suggested-by: Roman Kagan Signed-off-by: Vitaly Kuznetsov Reviewed-by: Roman Kagan Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 167 +++++++++++++++++------------------------- 1 file changed, 68 insertions(+), 99 deletions(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index f1ae1777519c..df9ef8f1d2c8 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -36,6 +36,8 @@ #include "trace.h" +#define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64) + static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint) { return atomic64_read(&synic->sint[sint]); @@ -1277,37 +1279,47 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) return kvm_hv_get_msr(vcpu, msr, pdata, host); } -static __always_inline bool hv_vcpu_in_sparse_set(struct kvm_vcpu_hv *hv_vcpu, - u64 sparse_banks[], - u64 valid_bank_mask) +static __always_inline unsigned long *sparse_set_to_vcpu_mask( + struct kvm *kvm, u64 *sparse_banks, u64 valid_bank_mask, + u64 *vp_bitmap, unsigned long *vcpu_bitmap) { - int bank = hv_vcpu->vp_index / 64, sbank; + struct kvm_hv *hv = &kvm->arch.hyperv; + struct kvm_vcpu *vcpu; + int i, bank, sbank = 0; - if (bank >= 64) - return false; + memset(vp_bitmap, 0, + KVM_HV_MAX_SPARSE_VCPU_SET_BITS * sizeof(*vp_bitmap)); + for_each_set_bit(bank, (unsigned long *)&valid_bank_mask, + KVM_HV_MAX_SPARSE_VCPU_SET_BITS) + vp_bitmap[bank] = sparse_banks[sbank++]; - if (!(valid_bank_mask & BIT_ULL(bank))) - return false; + if (likely(!atomic_read(&hv->num_mismatched_vp_indexes))) { + /* for all vcpus vp_index == vcpu_idx */ + return (unsigned long *)vp_bitmap; + } - /* Sparse bank number equals to the number of set bits before it */ - sbank = bitmap_weight((unsigned long *)&valid_bank_mask, bank); - - return !!(sparse_banks[sbank] & BIT_ULL(hv_vcpu->vp_index % 64)); + bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS); + kvm_for_each_vcpu(i, vcpu, kvm) { + if (test_bit(vcpu_to_hv_vcpu(vcpu)->vp_index, + (unsigned long *)vp_bitmap)) + __set_bit(i, vcpu_bitmap); + } + return vcpu_bitmap; } static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, u16 rep_cnt, bool ex) { struct kvm *kvm = current_vcpu->kvm; - struct kvm_hv *hv = &kvm->arch.hyperv; struct kvm_vcpu_hv *hv_vcpu = ¤t_vcpu->arch.hyperv; struct hv_tlb_flush_ex flush_ex; struct hv_tlb_flush flush; - struct kvm_vcpu *vcpu; - unsigned long vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)] = {0}; + u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS]; + DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS); + unsigned long *vcpu_mask; u64 valid_bank_mask; u64 sparse_banks[64]; - int sparse_banks_len, i, bank, sbank; + int sparse_banks_len; bool all_cpus; if (!ex) { @@ -1350,54 +1362,19 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, return HV_STATUS_INVALID_HYPERCALL_INPUT; } + cpumask_clear(&hv_vcpu->tlb_flush); + + vcpu_mask = all_cpus ? NULL : + sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, + vp_bitmap, vcpu_bitmap); + /* * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't * analyze it here, flush TLB regardless of the specified address space. */ - cpumask_clear(&hv_vcpu->tlb_flush); - - if (all_cpus) { - kvm_make_vcpus_request_mask(kvm, - KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP, - NULL, &hv_vcpu->tlb_flush); - goto ret_success; - } - - if (atomic_read(&hv->num_mismatched_vp_indexes)) { - kvm_for_each_vcpu(i, vcpu, kvm) { - if (hv_vcpu_in_sparse_set(&vcpu->arch.hyperv, - sparse_banks, - valid_bank_mask)) - __set_bit(i, vcpu_bitmap); - } - goto flush_request; - } - - /* - * num_mismatched_vp_indexes is zero so every vcpu has - * vp_index == vcpu_idx. - */ - sbank = 0; - for_each_set_bit(bank, (unsigned long *)&valid_bank_mask, - BITS_PER_LONG) { - for_each_set_bit(i, - (unsigned long *)&sparse_banks[sbank], - BITS_PER_LONG) { - u32 vp_index = bank * 64 + i; - - /* A non-existent vCPU was specified */ - if (vp_index >= KVM_MAX_VCPUS) - return HV_STATUS_INVALID_HYPERCALL_INPUT; - - __set_bit(vp_index, vcpu_bitmap); - } - sbank++; - } - -flush_request: kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP, - vcpu_bitmap, &hv_vcpu->tlb_flush); + vcpu_mask, &hv_vcpu->tlb_flush); ret_success: /* We always do full TLB flush, set rep_done = rep_cnt. */ @@ -1405,18 +1382,38 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, ((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET); } +static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector, + unsigned long *vcpu_bitmap) +{ + struct kvm_lapic_irq irq = { + .delivery_mode = APIC_DM_FIXED, + .vector = vector + }; + struct kvm_vcpu *vcpu; + int i; + + kvm_for_each_vcpu(i, vcpu, kvm) { + if (vcpu_bitmap && !test_bit(i, vcpu_bitmap)) + continue; + + /* We fail only when APIC is disabled */ + kvm_apic_set_irq(vcpu, &irq, NULL); + } +} + static u64 kvm_hv_send_ipi(struct kvm_vcpu *current_vcpu, u64 ingpa, u64 outgpa, bool ex, bool fast) { struct kvm *kvm = current_vcpu->kvm; - struct kvm_hv *hv = &kvm->arch.hyperv; struct hv_send_ipi_ex send_ipi_ex; struct hv_send_ipi send_ipi; - struct kvm_vcpu *vcpu; + u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS]; + DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS); + unsigned long *vcpu_mask; unsigned long valid_bank_mask; u64 sparse_banks[64]; - int sparse_banks_len, bank, i, sbank; - struct kvm_lapic_irq irq = {.delivery_mode = APIC_DM_FIXED}; + int sparse_banks_len; + u32 vector; bool all_cpus; if (!ex) { @@ -1425,18 +1422,18 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *current_vcpu, u64 ingpa, u64 outgpa, sizeof(send_ipi)))) return HV_STATUS_INVALID_HYPERCALL_INPUT; sparse_banks[0] = send_ipi.cpu_mask; - irq.vector = send_ipi.vector; + vector = send_ipi.vector; } else { /* 'reserved' part of hv_send_ipi should be 0 */ if (unlikely(ingpa >> 32 != 0)) return HV_STATUS_INVALID_HYPERCALL_INPUT; sparse_banks[0] = outgpa; - irq.vector = (u32)ingpa; + vector = (u32)ingpa; } all_cpus = false; valid_bank_mask = BIT_ULL(0); - trace_kvm_hv_send_ipi(irq.vector, sparse_banks[0]); + trace_kvm_hv_send_ipi(vector, sparse_banks[0]); } else { if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi_ex, sizeof(send_ipi_ex)))) @@ -1446,7 +1443,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *current_vcpu, u64 ingpa, u64 outgpa, send_ipi_ex.vp_set.format, send_ipi_ex.vp_set.valid_bank_mask); - irq.vector = send_ipi_ex.vector; + vector = send_ipi_ex.vector; valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask; sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) * sizeof(sparse_banks[0]); @@ -1465,42 +1462,14 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *current_vcpu, u64 ingpa, u64 outgpa, return HV_STATUS_INVALID_HYPERCALL_INPUT; } - if ((irq.vector < HV_IPI_LOW_VECTOR) || - (irq.vector > HV_IPI_HIGH_VECTOR)) + if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR)) return HV_STATUS_INVALID_HYPERCALL_INPUT; - if (all_cpus || atomic_read(&hv->num_mismatched_vp_indexes)) { - kvm_for_each_vcpu(i, vcpu, kvm) { - if (all_cpus || hv_vcpu_in_sparse_set( - &vcpu->arch.hyperv, sparse_banks, - valid_bank_mask)) { - /* We fail only when APIC is disabled */ - kvm_apic_set_irq(vcpu, &irq, NULL); - } - } - goto ret_success; - } + vcpu_mask = all_cpus ? NULL : + sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, + vp_bitmap, vcpu_bitmap); - /* - * num_mismatched_vp_indexes is zero so every vcpu has - * vp_index == vcpu_idx. - */ - sbank = 0; - for_each_set_bit(bank, (unsigned long *)&valid_bank_mask, 64) { - for_each_set_bit(i, (unsigned long *)&sparse_banks[sbank], 64) { - u32 vp_index = bank * 64 + i; - struct kvm_vcpu *vcpu = - get_vcpu_by_vpidx(kvm, vp_index); - - /* Unknown vCPU specified */ - if (!vcpu) - continue; - - /* We fail only when APIC is disabled */ - kvm_apic_set_irq(vcpu, &irq, NULL); - } - sbank++; - } + kvm_send_ipi_to_many(kvm, vector, vcpu_mask); ret_success: return HV_STATUS_SUCCESS; From 0e0a53c551317654e2d7885fdfd23299fee99b6b Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 22 Aug 2018 19:59:33 +0200 Subject: [PATCH 153/204] kvm: x86: optimize dr6 restore The quote from the comment almost says it all: we are currently zeroing the guest dr6 in kvm_arch_vcpu_put, because do_debug expects it. However, the host %dr6 is either: - zero because the guest hasn't run after kvm_arch_vcpu_load - written from vcpu->arch.dr6 by vcpu_enter_guest - written by the guest and copied to vcpu->arch.dr6 by ->sync_dirty_debug_regs(). Therefore, we can skip the write if vcpu->arch.dr6 is already zero. We may do extra useless writes if vcpu->arch.dr6 is nonzero but the guest hasn't run; however that is less important for performance. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 20a667da0a31..92159db9cf71 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3186,11 +3186,16 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) kvm_x86_ops->vcpu_put(vcpu); vcpu->arch.last_host_tsc = rdtsc(); /* - * If userspace has set any breakpoints or watchpoints, dr6 is restored - * on every vmexit, but if not, we might have a stale dr6 from the - * guest. do_debug expects dr6 to be cleared after it runs, do the same. + * Here dr6 is either zero or, if the guest has run and userspace + * has not set any breakpoints or watchpoints, it can be set to + * the guest dr6 (stored in vcpu->arch.dr6). do_debug expects dr6 + * to be cleared after it runs, so clear the host register. However, + * MOV to DR can be expensive when running nested, omit it if + * vcpu->arch.dr6 is already zero: in that case, the host dr6 cannot + * currently be nonzero. */ - set_debugreg(0, 6); + if (vcpu->arch.dr6) + set_debugreg(0, 6); } static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, From 44dd3ffa7bb31126e0fc4f6f30398546eb494388 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 8 Oct 2018 21:28:05 +0200 Subject: [PATCH 154/204] x86/kvm/mmu: make vcpu->mmu a pointer to the current MMU As a preparation to full MMU split between L1 and L2 make vcpu->arch.mmu a pointer to the currently used mmu. For now, this is always vcpu->arch.root_mmu. No functional change. Signed-off-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini Reviewed-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 5 +- arch/x86/kvm/mmu.c | 165 ++++++++++++++++---------------- arch/x86/kvm/mmu.h | 8 +- arch/x86/kvm/mmu_audit.c | 12 +-- arch/x86/kvm/paging_tmpl.h | 15 +-- arch/x86/kvm/svm.c | 14 +-- arch/x86/kvm/vmx.c | 15 +-- arch/x86/kvm/x86.c | 20 ++-- 8 files changed, 130 insertions(+), 124 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 901572b4f6f7..5e8f1a08b9d2 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -543,7 +543,10 @@ struct kvm_vcpu_arch { * the paging mode of the l1 guest. This context is always used to * handle faults. */ - struct kvm_mmu mmu; + struct kvm_mmu *mmu; + + /* Non-nested MMU for L1 */ + struct kvm_mmu root_mmu; /* * Paging state of an L2 guest (used for nested npt) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index b383e8432649..dac9e977c703 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2181,7 +2181,7 @@ static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, struct list_head *invalid_list) { if (sp->role.cr4_pae != !!is_pae(vcpu) - || vcpu->arch.mmu.sync_page(vcpu, sp) == 0) { + || vcpu->arch.mmu->sync_page(vcpu, sp) == 0) { kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); return false; } @@ -2375,14 +2375,14 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, int collisions = 0; LIST_HEAD(invalid_list); - role = vcpu->arch.mmu.base_role; + role = vcpu->arch.mmu->base_role; role.level = level; role.direct = direct; if (role.direct) role.cr4_pae = 0; role.access = access; - if (!vcpu->arch.mmu.direct_map - && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { + if (!vcpu->arch.mmu->direct_map + && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) { quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; role.quadrant = quadrant; @@ -2457,11 +2457,11 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato { iterator->addr = addr; iterator->shadow_addr = root; - iterator->level = vcpu->arch.mmu.shadow_root_level; + iterator->level = vcpu->arch.mmu->shadow_root_level; if (iterator->level == PT64_ROOT_4LEVEL && - vcpu->arch.mmu.root_level < PT64_ROOT_4LEVEL && - !vcpu->arch.mmu.direct_map) + vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL && + !vcpu->arch.mmu->direct_map) --iterator->level; if (iterator->level == PT32E_ROOT_LEVEL) { @@ -2469,10 +2469,10 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato * prev_root is currently only used for 64-bit hosts. So only * the active root_hpa is valid here. */ - BUG_ON(root != vcpu->arch.mmu.root_hpa); + BUG_ON(root != vcpu->arch.mmu->root_hpa); iterator->shadow_addr - = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; + = vcpu->arch.mmu->pae_root[(addr >> 30) & 3]; iterator->shadow_addr &= PT64_BASE_ADDR_MASK; --iterator->level; if (!iterator->shadow_addr) @@ -2483,7 +2483,7 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, struct kvm_vcpu *vcpu, u64 addr) { - shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu.root_hpa, + shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa, addr); } @@ -3095,7 +3095,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable, int emulate = 0; gfn_t pseudo_gfn; - if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) return 0; for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { @@ -3310,7 +3310,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, u64 spte = 0ull; uint retry_count = 0; - if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) return false; if (!page_fault_can_be_fast(error_code)) @@ -3484,7 +3484,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, ulong roots_to_free) { int i; LIST_HEAD(invalid_list); - struct kvm_mmu *mmu = &vcpu->arch.mmu; + struct kvm_mmu *mmu = vcpu->arch.mmu; bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT; BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG); @@ -3544,20 +3544,20 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) struct kvm_mmu_page *sp; unsigned i; - if (vcpu->arch.mmu.shadow_root_level >= PT64_ROOT_4LEVEL) { + if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) { spin_lock(&vcpu->kvm->mmu_lock); if(make_mmu_pages_available(vcpu) < 0) { spin_unlock(&vcpu->kvm->mmu_lock); return -ENOSPC; } sp = kvm_mmu_get_page(vcpu, 0, 0, - vcpu->arch.mmu.shadow_root_level, 1, ACC_ALL); + vcpu->arch.mmu->shadow_root_level, 1, ACC_ALL); ++sp->root_count; spin_unlock(&vcpu->kvm->mmu_lock); - vcpu->arch.mmu.root_hpa = __pa(sp->spt); - } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) { + vcpu->arch.mmu->root_hpa = __pa(sp->spt); + } else if (vcpu->arch.mmu->shadow_root_level == PT32E_ROOT_LEVEL) { for (i = 0; i < 4; ++i) { - hpa_t root = vcpu->arch.mmu.pae_root[i]; + hpa_t root = vcpu->arch.mmu->pae_root[i]; MMU_WARN_ON(VALID_PAGE(root)); spin_lock(&vcpu->kvm->mmu_lock); @@ -3570,9 +3570,9 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) root = __pa(sp->spt); ++sp->root_count; spin_unlock(&vcpu->kvm->mmu_lock); - vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; + vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK; } - vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); + vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); } else BUG(); @@ -3586,7 +3586,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) gfn_t root_gfn; int i; - root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT; + root_gfn = vcpu->arch.mmu->get_cr3(vcpu) >> PAGE_SHIFT; if (mmu_check_root(vcpu, root_gfn)) return 1; @@ -3595,8 +3595,8 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) * Do we shadow a long mode page table? If so we need to * write-protect the guests page table root. */ - if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) { - hpa_t root = vcpu->arch.mmu.root_hpa; + if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { + hpa_t root = vcpu->arch.mmu->root_hpa; MMU_WARN_ON(VALID_PAGE(root)); @@ -3606,11 +3606,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) return -ENOSPC; } sp = kvm_mmu_get_page(vcpu, root_gfn, 0, - vcpu->arch.mmu.shadow_root_level, 0, ACC_ALL); + vcpu->arch.mmu->shadow_root_level, 0, ACC_ALL); root = __pa(sp->spt); ++sp->root_count; spin_unlock(&vcpu->kvm->mmu_lock); - vcpu->arch.mmu.root_hpa = root; + vcpu->arch.mmu->root_hpa = root; return 0; } @@ -3620,17 +3620,17 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) * the shadow page table may be a PAE or a long mode page table. */ pm_mask = PT_PRESENT_MASK; - if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL) + if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; for (i = 0; i < 4; ++i) { - hpa_t root = vcpu->arch.mmu.pae_root[i]; + hpa_t root = vcpu->arch.mmu->pae_root[i]; MMU_WARN_ON(VALID_PAGE(root)); - if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { - pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i); + if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) { + pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i); if (!(pdptr & PT_PRESENT_MASK)) { - vcpu->arch.mmu.pae_root[i] = 0; + vcpu->arch.mmu->pae_root[i] = 0; continue; } root_gfn = pdptr >> PAGE_SHIFT; @@ -3648,16 +3648,16 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ++sp->root_count; spin_unlock(&vcpu->kvm->mmu_lock); - vcpu->arch.mmu.pae_root[i] = root | pm_mask; + vcpu->arch.mmu->pae_root[i] = root | pm_mask; } - vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); + vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); /* * If we shadow a 32 bit page table with a long mode page * table we enter this path. */ - if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL) { - if (vcpu->arch.mmu.lm_root == NULL) { + if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) { + if (vcpu->arch.mmu->lm_root == NULL) { /* * The additional page necessary for this is only * allocated on demand. @@ -3669,12 +3669,12 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) if (lm_root == NULL) return 1; - lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask; + lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask; - vcpu->arch.mmu.lm_root = lm_root; + vcpu->arch.mmu->lm_root = lm_root; } - vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root); + vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root); } return 0; @@ -3682,7 +3682,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) static int mmu_alloc_roots(struct kvm_vcpu *vcpu) { - if (vcpu->arch.mmu.direct_map) + if (vcpu->arch.mmu->direct_map) return mmu_alloc_direct_roots(vcpu); else return mmu_alloc_shadow_roots(vcpu); @@ -3693,17 +3693,16 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) int i; struct kvm_mmu_page *sp; - if (vcpu->arch.mmu.direct_map) + if (vcpu->arch.mmu->direct_map) return; - if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) return; vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); - if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) { - hpa_t root = vcpu->arch.mmu.root_hpa; - + if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { + hpa_t root = vcpu->arch.mmu->root_hpa; sp = page_header(root); /* @@ -3734,7 +3733,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); for (i = 0; i < 4; ++i) { - hpa_t root = vcpu->arch.mmu.pae_root[i]; + hpa_t root = vcpu->arch.mmu->pae_root[i]; if (root && VALID_PAGE(root)) { root &= PT64_BASE_ADDR_MASK; @@ -3808,7 +3807,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) int root, leaf; bool reserved = false; - if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) goto exit; walk_shadow_page_lockless_begin(vcpu); @@ -3825,7 +3824,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) if (!is_shadow_present_pte(spte)) break; - reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte, + reserved |= is_shadow_zero_bits_set(vcpu->arch.mmu, spte, iterator.level); } @@ -3904,7 +3903,7 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) struct kvm_shadow_walk_iterator iterator; u64 spte; - if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) return; walk_shadow_page_lockless_begin(vcpu); @@ -3931,7 +3930,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, if (r) return r; - MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); + MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)); return nonpaging_map(vcpu, gva & PAGE_MASK, @@ -3944,8 +3943,8 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; arch.gfn = gfn; - arch.direct_map = vcpu->arch.mmu.direct_map; - arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu); + arch.direct_map = vcpu->arch.mmu->direct_map; + arch.cr3 = vcpu->arch.mmu->get_cr3(vcpu); return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); } @@ -4051,7 +4050,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, int write = error_code & PFERR_WRITE_MASK; bool map_writable; - MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); + MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)); if (page_fault_handle_page_track(vcpu, error_code, gfn)) return RET_PF_EMULATE; @@ -4127,7 +4126,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3, { uint i; struct kvm_mmu_root_info root; - struct kvm_mmu *mmu = &vcpu->arch.mmu; + struct kvm_mmu *mmu = vcpu->arch.mmu; root.cr3 = mmu->get_cr3(vcpu); root.hpa = mmu->root_hpa; @@ -4150,7 +4149,7 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3, union kvm_mmu_page_role new_role, bool skip_tlb_flush) { - struct kvm_mmu *mmu = &vcpu->arch.mmu; + struct kvm_mmu *mmu = vcpu->arch.mmu; /* * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid @@ -4219,7 +4218,7 @@ static unsigned long get_cr3(struct kvm_vcpu *vcpu) static void inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { - vcpu->arch.mmu.inject_page_fault(vcpu, fault); + vcpu->arch.mmu->inject_page_fault(vcpu, fault); } static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, @@ -4740,7 +4739,7 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu) static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) { - struct kvm_mmu *context = &vcpu->arch.mmu; + struct kvm_mmu *context = vcpu->arch.mmu; context->base_role.word = mmu_base_role_mask.word & kvm_calc_tdp_mmu_root_page_role(vcpu).word; @@ -4812,7 +4811,7 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu) void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) { - struct kvm_mmu *context = &vcpu->arch.mmu; + struct kvm_mmu *context = vcpu->arch.mmu; if (!is_paging(vcpu)) nonpaging_init_context(vcpu, context); @@ -4832,7 +4831,7 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); static union kvm_mmu_page_role kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty) { - union kvm_mmu_page_role role = vcpu->arch.mmu.base_role; + union kvm_mmu_page_role role = vcpu->arch.mmu->base_role; role.level = PT64_ROOT_4LEVEL; role.direct = false; @@ -4846,7 +4845,7 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty) void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, bool accessed_dirty, gpa_t new_eptp) { - struct kvm_mmu *context = &vcpu->arch.mmu; + struct kvm_mmu *context = vcpu->arch.mmu; union kvm_mmu_page_role root_page_role = kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty); @@ -4873,7 +4872,7 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); static void init_kvm_softmmu(struct kvm_vcpu *vcpu) { - struct kvm_mmu *context = &vcpu->arch.mmu; + struct kvm_mmu *context = vcpu->arch.mmu; kvm_init_shadow_mmu(vcpu); context->set_cr3 = kvm_x86_ops->set_cr3; @@ -4891,7 +4890,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) g_context->inject_page_fault = kvm_inject_page_fault; /* - * Note that arch.mmu.gva_to_gpa translates l2_gpa to l1_gpa using + * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using * L1's nested page tables (e.g. EPT12). The nested translation * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using * L2's page tables as the first level of translation and L1's @@ -4930,10 +4929,10 @@ void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots) if (reset_roots) { uint i; - vcpu->arch.mmu.root_hpa = INVALID_PAGE; + vcpu->arch.mmu->root_hpa = INVALID_PAGE; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) - vcpu->arch.mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; + vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; } if (mmu_is_nested(vcpu)) @@ -4982,7 +4981,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_load); void kvm_mmu_unload(struct kvm_vcpu *vcpu) { kvm_mmu_free_roots(vcpu, KVM_MMU_ROOTS_ALL); - WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); + WARN_ON(VALID_PAGE(vcpu->arch.mmu->root_hpa)); } EXPORT_SYMBOL_GPL(kvm_mmu_unload); @@ -4996,7 +4995,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, } ++vcpu->kvm->stat.mmu_pte_updated; - vcpu->arch.mmu.update_pte(vcpu, sp, spte, new); + vcpu->arch.mmu->update_pte(vcpu, sp, spte, new); } static bool need_remote_flush(u64 old, u64 new) @@ -5176,7 +5175,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, entry = *spte; mmu_page_zap_pte(vcpu->kvm, sp, spte); if (gentry && - !((sp->role.word ^ vcpu->arch.mmu.base_role.word) + !((sp->role.word ^ vcpu->arch.mmu->base_role.word) & mmu_base_role_mask.word) && rmap_can_add(vcpu)) mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); if (need_remote_flush(entry, *spte)) @@ -5194,7 +5193,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) gpa_t gpa; int r; - if (vcpu->arch.mmu.direct_map) + if (vcpu->arch.mmu->direct_map) return 0; gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); @@ -5230,10 +5229,10 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, { int r, emulation_type = 0; enum emulation_result er; - bool direct = vcpu->arch.mmu.direct_map; + bool direct = vcpu->arch.mmu->direct_map; /* With shadow page tables, fault_address contains a GVA or nGPA. */ - if (vcpu->arch.mmu.direct_map) { + if (vcpu->arch.mmu->direct_map) { vcpu->arch.gpa_available = true; vcpu->arch.gpa_val = cr2; } @@ -5246,8 +5245,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, } if (r == RET_PF_INVALID) { - r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code), - false); + r = vcpu->arch.mmu->page_fault(vcpu, cr2, + lower_32_bits(error_code), + false); WARN_ON(r == RET_PF_INVALID); } @@ -5263,7 +5263,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, * paging in both guests. If true, we simply unprotect the page * and resume the guest. */ - if (vcpu->arch.mmu.direct_map && + if (vcpu->arch.mmu->direct_map && (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) { kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2)); return 1; @@ -5311,7 +5311,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) { - struct kvm_mmu *mmu = &vcpu->arch.mmu; + struct kvm_mmu *mmu = vcpu->arch.mmu; int i; /* INVLPG on a * non-canonical address is a NOP according to the SDM. */ @@ -5342,7 +5342,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) { - struct kvm_mmu *mmu = &vcpu->arch.mmu; + struct kvm_mmu *mmu = vcpu->arch.mmu; bool tlb_flush = false; uint i; @@ -5386,8 +5386,8 @@ EXPORT_SYMBOL_GPL(kvm_disable_tdp); static void free_mmu_pages(struct kvm_vcpu *vcpu) { - free_page((unsigned long)vcpu->arch.mmu.pae_root); - free_page((unsigned long)vcpu->arch.mmu.lm_root); + free_page((unsigned long)vcpu->arch.mmu->pae_root); + free_page((unsigned long)vcpu->arch.mmu->lm_root); } static int alloc_mmu_pages(struct kvm_vcpu *vcpu) @@ -5407,9 +5407,9 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu) if (!page) return -ENOMEM; - vcpu->arch.mmu.pae_root = page_address(page); + vcpu->arch.mmu->pae_root = page_address(page); for (i = 0; i < 4; ++i) - vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; + vcpu->arch.mmu->pae_root[i] = INVALID_PAGE; return 0; } @@ -5418,20 +5418,21 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) { uint i; - vcpu->arch.walk_mmu = &vcpu->arch.mmu; - vcpu->arch.mmu.root_hpa = INVALID_PAGE; - vcpu->arch.mmu.translate_gpa = translate_gpa; + vcpu->arch.mmu = &vcpu->arch.root_mmu; + vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; + vcpu->arch.root_mmu.root_hpa = INVALID_PAGE; + vcpu->arch.root_mmu.translate_gpa = translate_gpa; vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) - vcpu->arch.mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; + vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; return alloc_mmu_pages(vcpu); } void kvm_mmu_setup(struct kvm_vcpu *vcpu) { - MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); + MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->root_hpa)); /* * kvm_mmu_setup() is called only on vCPU initialization. diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 99d549905311..c7b333147c4a 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -75,7 +75,7 @@ static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) { - if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE)) + if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE)) return 0; return kvm_mmu_load(vcpu); @@ -97,9 +97,9 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu) static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu) { - if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) - vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa | - kvm_get_active_pcid(vcpu)); + if (VALID_PAGE(vcpu->arch.mmu->root_hpa)) + vcpu->arch.mmu->set_cr3(vcpu, vcpu->arch.mmu->root_hpa | + kvm_get_active_pcid(vcpu)); } /* diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c index 1272861e77b9..abac7e208853 100644 --- a/arch/x86/kvm/mmu_audit.c +++ b/arch/x86/kvm/mmu_audit.c @@ -59,19 +59,19 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn) int i; struct kvm_mmu_page *sp; - if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) return; - if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) { - hpa_t root = vcpu->arch.mmu.root_hpa; + if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { + hpa_t root = vcpu->arch.mmu->root_hpa; sp = page_header(root); - __mmu_spte_walk(vcpu, sp, fn, vcpu->arch.mmu.root_level); + __mmu_spte_walk(vcpu, sp, fn, vcpu->arch.mmu->root_level); return; } for (i = 0; i < 4; ++i) { - hpa_t root = vcpu->arch.mmu.pae_root[i]; + hpa_t root = vcpu->arch.mmu->pae_root[i]; if (root && VALID_PAGE(root)) { root &= PT64_BASE_ADDR_MASK; @@ -122,7 +122,7 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level) hpa = pfn << PAGE_SHIFT; if ((*sptep & PT64_BASE_ADDR_MASK) != hpa) audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx " - "ent %llxn", vcpu->arch.mmu.root_level, pfn, + "ent %llxn", vcpu->arch.mmu->root_level, pfn, hpa, *sptep); } diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 14ffd973df54..7cf2185b7eb5 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -158,14 +158,15 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, u64 gpte) { - if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) + if (is_rsvd_bits_set(vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) goto no_present; if (!FNAME(is_present_gpte)(gpte)) goto no_present; /* if accessed bit is not supported prefetch non accessed gpte */ - if (PT_HAVE_ACCESSED_DIRTY(&vcpu->arch.mmu) && !(gpte & PT_GUEST_ACCESSED_MASK)) + if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) && + !(gpte & PT_GUEST_ACCESSED_MASK)) goto no_present; return false; @@ -480,7 +481,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, static int FNAME(walk_addr)(struct guest_walker *walker, struct kvm_vcpu *vcpu, gva_t addr, u32 access) { - return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr, + return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr, access); } @@ -509,7 +510,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, gfn = gpte_to_gfn(gpte); pte_access = sp->role.access & FNAME(gpte_access)(gpte); - FNAME(protect_clean_gpte)(&vcpu->arch.mmu, &pte_access, gpte); + FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, no_dirty_log && (pte_access & ACC_WRITE_MASK)); if (is_error_pfn(pfn)) @@ -604,7 +605,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, direct_access = gw->pte_access; - top_level = vcpu->arch.mmu.root_level; + top_level = vcpu->arch.mmu->root_level; if (top_level == PT32E_ROOT_LEVEL) top_level = PT32_ROOT_LEVEL; /* @@ -616,7 +617,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, if (FNAME(gpte_changed)(vcpu, gw, top_level)) goto out_gpte_changed; - if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) goto out_gpte_changed; for (shadow_walk_init(&it, vcpu, addr); @@ -1004,7 +1005,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) gfn = gpte_to_gfn(gpte); pte_access = sp->role.access; pte_access &= FNAME(gpte_access)(gpte); - FNAME(protect_clean_gpte)(&vcpu->arch.mmu, &pte_access, gpte); + FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access, &nr_present)) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index d96092b35936..2936c63bcc2f 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -2918,18 +2918,18 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) { WARN_ON(mmu_is_nested(vcpu)); kvm_init_shadow_mmu(vcpu); - vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3; - vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3; - vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr; - vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit; - vcpu->arch.mmu.shadow_root_level = get_npt_level(vcpu); - reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu); + vcpu->arch.mmu->set_cr3 = nested_svm_set_tdp_cr3; + vcpu->arch.mmu->get_cr3 = nested_svm_get_tdp_cr3; + vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr; + vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit; + vcpu->arch.mmu->shadow_root_level = get_npt_level(vcpu); + reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu); vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; } static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) { - vcpu->arch.walk_mmu = &vcpu->arch.mmu; + vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; } static int nested_svm_check_permissions(struct vcpu_svm *svm) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index c255686ebd1a..d243eba62340 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -5111,9 +5111,10 @@ static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid, bool invalidate_gpa) { if (enable_ept && (invalidate_gpa || !enable_vpid)) { - if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) return; - ept_sync_context(construct_eptp(vcpu, vcpu->arch.mmu.root_hpa)); + ept_sync_context(construct_eptp(vcpu, + vcpu->arch.mmu->root_hpa)); } else { vpid_sync_context(vpid); } @@ -9122,7 +9123,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu) } for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) - if (kvm_get_pcid(vcpu, vcpu->arch.mmu.prev_roots[i].cr3) + if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].cr3) == operand.pcid) roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); @@ -11304,16 +11305,16 @@ static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) VMX_EPT_EXECUTE_ONLY_BIT, nested_ept_ad_enabled(vcpu), nested_ept_get_cr3(vcpu)); - vcpu->arch.mmu.set_cr3 = vmx_set_cr3; - vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3; - vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault; + vcpu->arch.mmu->set_cr3 = vmx_set_cr3; + vcpu->arch.mmu->get_cr3 = nested_ept_get_cr3; + vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault; vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; } static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) { - vcpu->arch.walk_mmu = &vcpu->arch.mmu; + vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; } static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 92159db9cf71..a87e879373fe 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -503,7 +503,7 @@ static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fau if (mmu_is_nested(vcpu) && !fault->nested_page_fault) vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); else - vcpu->arch.mmu.inject_page_fault(vcpu, fault); + vcpu->arch.mmu->inject_page_fault(vcpu, fault); return fault->nested_page_fault; } @@ -602,7 +602,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { if ((pdpte[i] & PT_PRESENT_MASK) && (pdpte[i] & - vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) { + vcpu->arch.mmu->guest_rsvd_check.rsvd_bits_mask[0][2])) { ret = 0; goto out; } @@ -4809,7 +4809,7 @@ gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, /* NPT walks are always user-walks */ access |= PFERR_USER_MASK; - t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception); + t_gpa = vcpu->arch.mmu->gva_to_gpa(vcpu, gpa, access, exception); return t_gpa; } @@ -5895,7 +5895,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, if (WARN_ON_ONCE(is_guest_mode(vcpu))) return false; - if (!vcpu->arch.mmu.direct_map) { + if (!vcpu->arch.mmu->direct_map) { /* * Write permission should be allowed since only * write access need to be emulated. @@ -5928,7 +5928,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, kvm_release_pfn_clean(pfn); /* The instructions are well-emulated on direct mmu. */ - if (vcpu->arch.mmu.direct_map) { + if (vcpu->arch.mmu->direct_map) { unsigned int indirect_shadow_pages; spin_lock(&vcpu->kvm->mmu_lock); @@ -5995,7 +5995,7 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt, vcpu->arch.last_retry_eip = ctxt->eip; vcpu->arch.last_retry_addr = cr2; - if (!vcpu->arch.mmu.direct_map) + if (!vcpu->arch.mmu->direct_map) gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); @@ -9333,7 +9333,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { int r; - if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || + if ((vcpu->arch.mmu->direct_map != work->arch.direct_map) || work->wakeup_all) return; @@ -9341,11 +9341,11 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) if (unlikely(r)) return; - if (!vcpu->arch.mmu.direct_map && - work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu)) + if (!vcpu->arch.mmu->direct_map && + work->arch.cr3 != vcpu->arch.mmu->get_cr3(vcpu)) return; - vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); + vcpu->arch.mmu->page_fault(vcpu, work->gva, 0, true); } static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) From 3dc773e745234d904b09da37075200ec46af4e7a Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 8 Oct 2018 21:28:06 +0200 Subject: [PATCH 155/204] x86/kvm/mmu.c: set get_pdptr hook in kvm_init_shadow_ept_mmu() kvm_init_shadow_ept_mmu() doesn't set get_pdptr() hook and is this not a problem just because MMU context is already initialized and this hook points to kvm_pdptr_read(). As we're intended to use a dedicated MMU for shadow EPT MMU set this hook explicitly. Signed-off-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini Reviewed-by: Sean Christopherson --- arch/x86/kvm/mmu.c | 1 + arch/x86/kvm/vmx.c | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index dac9e977c703..5b14dcbd248a 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4862,6 +4862,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, context->root_level = PT64_ROOT_4LEVEL; context->direct_map = false; context->base_role.word = root_page_role.word & mmu_base_role_mask.word; + update_permission_bitmask(vcpu, context, true); update_pkru_bitmask(vcpu, context, true); update_last_nonleaf_level(vcpu, context); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d243eba62340..fa7e46210a0d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -11308,6 +11308,7 @@ static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) vcpu->arch.mmu->set_cr3 = vmx_set_cr3; vcpu->arch.mmu->get_cr3 = nested_ept_get_cr3; vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault; + vcpu->arch.mmu->get_pdptr = kvm_pdptr_read; vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; } From 6a82cd1c7b1e38d3b940fcb35a81e902dd52fb35 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 8 Oct 2018 21:28:07 +0200 Subject: [PATCH 156/204] x86/kvm/mmu.c: add kvm_mmu parameter to kvm_mmu_free_roots() Add an option to specify which MMU root we want to free. This will be used when nested and non-nested MMUs for L1 are split. Signed-off-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini Reviewed-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 3 ++- arch/x86/kvm/mmu.c | 9 +++++---- arch/x86/kvm/vmx.c | 2 +- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 5e8f1a08b9d2..586ef144e564 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1339,7 +1339,8 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); int kvm_mmu_load(struct kvm_vcpu *vcpu); void kvm_mmu_unload(struct kvm_vcpu *vcpu); void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); -void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, ulong roots_to_free); +void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, + ulong roots_to_free); gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, struct x86_exception *exception); gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 5b14dcbd248a..96c753746b3b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3480,11 +3480,11 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, } /* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */ -void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, ulong roots_to_free) +void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, + ulong roots_to_free) { int i; LIST_HEAD(invalid_list); - struct kvm_mmu *mmu = vcpu->arch.mmu; bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT; BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG); @@ -4200,7 +4200,8 @@ static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush) { if (!fast_cr3_switch(vcpu, new_cr3, new_role, skip_tlb_flush)) - kvm_mmu_free_roots(vcpu, KVM_MMU_ROOT_CURRENT); + kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, + KVM_MMU_ROOT_CURRENT); } void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush) @@ -4981,7 +4982,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_load); void kvm_mmu_unload(struct kvm_vcpu *vcpu) { - kvm_mmu_free_roots(vcpu, KVM_MMU_ROOTS_ALL); + kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOTS_ALL); WARN_ON(VALID_PAGE(vcpu->arch.mmu->root_hpa)); } EXPORT_SYMBOL_GPL(kvm_mmu_unload); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index fa7e46210a0d..a0c3992a5e88 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -9127,7 +9127,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu) == operand.pcid) roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); - kvm_mmu_free_roots(vcpu, roots_to_free); + kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free); /* * If neither the current cr3 nor any of the prev_roots use the * given PCID, then nothing needs to be done here because a From 14c07ad89f4d728a468caaea6a769c018c2b8dd6 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 8 Oct 2018 21:28:08 +0200 Subject: [PATCH 157/204] x86/kvm/mmu: introduce guest_mmu When EPT is used for nested guest we need to re-init MMU as shadow EPT MMU (nested_ept_init_mmu_context() does that). When we return back from L2 to L1 kvm_mmu_reset_context() in nested_vmx_load_cr3() resets MMU back to normal TDP mode. Add a special 'guest_mmu' so we can use separate root caches; the improved hit rate is not very important for single vCPU performance, but it avoids contention on the mmu_lock for many vCPUs. On the nested CPUID benchmark, with 16 vCPUs, an L2->L1->L2 vmexit goes from 42k to 26k cycles. Signed-off-by: Vitaly Kuznetsov Reviewed-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 3 +++ arch/x86/kvm/mmu.c | 20 ++++++++++++++----- arch/x86/kvm/vmx.c | 35 +++++++++++++++++++++------------ 3 files changed, 40 insertions(+), 18 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 586ef144e564..7448d0b744c9 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -548,6 +548,9 @@ struct kvm_vcpu_arch { /* Non-nested MMU for L1 */ struct kvm_mmu root_mmu; + /* L1 MMU when running nested */ + struct kvm_mmu guest_mmu; + /* * Paging state of an L2 guest (used for nested npt) * diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 96c753746b3b..bf9e3145ff0b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4832,7 +4832,10 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); static union kvm_mmu_page_role kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty) { - union kvm_mmu_page_role role = vcpu->arch.mmu->base_role; + union kvm_mmu_page_role role; + + /* Role is inherited from root_mmu */ + role.word = vcpu->arch.root_mmu.base_role.word; role.level = PT64_ROOT_4LEVEL; role.direct = false; @@ -4982,8 +4985,10 @@ EXPORT_SYMBOL_GPL(kvm_mmu_load); void kvm_mmu_unload(struct kvm_vcpu *vcpu) { - kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOTS_ALL); - WARN_ON(VALID_PAGE(vcpu->arch.mmu->root_hpa)); + kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL); + WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa)); + kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); + WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa)); } EXPORT_SYMBOL_GPL(kvm_mmu_unload); @@ -5422,13 +5427,18 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) vcpu->arch.mmu = &vcpu->arch.root_mmu; vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; + vcpu->arch.root_mmu.root_hpa = INVALID_PAGE; vcpu->arch.root_mmu.translate_gpa = translate_gpa; - vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa; - for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; + vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE; + vcpu->arch.guest_mmu.translate_gpa = translate_gpa; + for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) + vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; + + vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa; return alloc_mmu_pages(vcpu); } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a0c3992a5e88..02888031d038 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -8423,8 +8423,10 @@ static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) vmcs_write64(VMCS_LINK_POINTER, -1ull); } -static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) +static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu) { + struct vcpu_vmx *vmx = to_vmx(vcpu); + if (vmx->nested.current_vmptr == -1ull) return; @@ -8438,10 +8440,12 @@ static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) vmx->nested.posted_intr_nv = -1; /* Flush VMCS12 to guest memory */ - kvm_vcpu_write_guest_page(&vmx->vcpu, + kvm_vcpu_write_guest_page(vcpu, vmx->nested.current_vmptr >> PAGE_SHIFT, vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); + kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); + vmx->nested.current_vmptr = -1ull; } @@ -8449,8 +8453,10 @@ static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) * Free whatever needs to be freed from vmx->nested when L1 goes down, or * just stops using VMX. */ -static void free_nested(struct vcpu_vmx *vmx) +static void free_nested(struct kvm_vcpu *vcpu) { + struct vcpu_vmx *vmx = to_vmx(vcpu); + if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) return; @@ -8483,6 +8489,8 @@ static void free_nested(struct vcpu_vmx *vmx) vmx->nested.pi_desc = NULL; } + kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); + free_loaded_vmcs(&vmx->nested.vmcs02); } @@ -8491,7 +8499,7 @@ static int handle_vmoff(struct kvm_vcpu *vcpu) { if (!nested_vmx_check_permission(vcpu)) return 1; - free_nested(to_vmx(vcpu)); + free_nested(vcpu); return nested_vmx_succeed(vcpu); } @@ -8517,7 +8525,7 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) VMXERR_VMCLEAR_VMXON_POINTER); if (vmptr == vmx->nested.current_vmptr) - nested_release_vmcs12(vmx); + nested_release_vmcs12(vcpu); kvm_vcpu_write_guest(vcpu, vmptr + offsetof(struct vmcs12, launch_state), @@ -8872,7 +8880,8 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); } - nested_release_vmcs12(vmx); + nested_release_vmcs12(vcpu); + /* * Load VMCS12 from guest memory since it is not already * cached. @@ -10928,12 +10937,10 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) */ static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu) { - struct vcpu_vmx *vmx = to_vmx(vcpu); - - vcpu_load(vcpu); - vmx_switch_vmcs(vcpu, &vmx->vmcs01); - free_nested(vmx); - vcpu_put(vcpu); + vcpu_load(vcpu); + vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01); + free_nested(vcpu); + vcpu_put(vcpu); } static void vmx_free_vcpu(struct kvm_vcpu *vcpu) @@ -11300,6 +11307,7 @@ static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) { WARN_ON(mmu_is_nested(vcpu)); + vcpu->arch.mmu = &vcpu->arch.guest_mmu; kvm_init_shadow_ept_mmu(vcpu, to_vmx(vcpu)->nested.msrs.ept_caps & VMX_EPT_EXECUTE_ONLY_BIT, @@ -11315,6 +11323,7 @@ static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) { + vcpu->arch.mmu = &vcpu->arch.root_mmu; vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; } @@ -13731,7 +13740,7 @@ static void vmx_leave_nested(struct kvm_vcpu *vcpu) to_vmx(vcpu)->nested.nested_run_pending = 0; nested_vmx_vmexit(vcpu, -1, 0, 0); } - free_nested(to_vmx(vcpu)); + free_nested(vcpu); } static int vmx_check_intercept(struct kvm_vcpu *vcpu, From e173299101affc677db085b2894a43be4a17a94b Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 8 Oct 2018 21:28:09 +0200 Subject: [PATCH 158/204] x86/kvm/mmu: get rid of redundant kvm_mmu_setup() Just inline the contents into the sole caller, kvm_init_mmu is now public. Suggested-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini Reviewed-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 1 - arch/x86/kvm/mmu.c | 12 ------------ arch/x86/kvm/x86.c | 2 +- 3 files changed, 1 insertion(+), 14 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 7448d0b744c9..992bc1058170 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1188,7 +1188,6 @@ void kvm_mmu_module_exit(void); void kvm_mmu_destroy(struct kvm_vcpu *vcpu); int kvm_mmu_create(struct kvm_vcpu *vcpu); -void kvm_mmu_setup(struct kvm_vcpu *vcpu); void kvm_mmu_init_vm(struct kvm *kvm); void kvm_mmu_uninit_vm(struct kvm *kvm); void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index bf9e3145ff0b..dd147fa5b3f4 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -5442,18 +5442,6 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) return alloc_mmu_pages(vcpu); } -void kvm_mmu_setup(struct kvm_vcpu *vcpu) -{ - MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->root_hpa)); - - /* - * kvm_mmu_setup() is called only on vCPU initialization. - * Therefore, no need to reset mmu roots as they are not yet - * initialized. - */ - kvm_init_mmu(vcpu, false); -} - static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, struct kvm_page_track_notifier_node *node) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a87e879373fe..2750270b7a39 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8484,7 +8484,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) kvm_vcpu_mtrr_init(vcpu); vcpu_load(vcpu); kvm_vcpu_reset(vcpu, false); - kvm_mmu_setup(vcpu); + kvm_init_mmu(vcpu, false); vcpu_put(vcpu); return 0; } From 36d9594dfbf22a59adb986d85e0543886ab898f2 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 8 Oct 2018 21:28:10 +0200 Subject: [PATCH 159/204] x86/kvm/mmu: make space for source data caching in struct kvm_mmu In preparation to MMU reconfiguration avoidance we need a space to cache source data. As this partially intersects with kvm_mmu_page_role, create 64bit sized union kvm_mmu_role holding both base and extended data. No functional change. Signed-off-by: Vitaly Kuznetsov Reviewed-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 16 ++++++++++++++-- arch/x86/kvm/mmu.c | 26 ++++++++++++++++++++------ arch/x86/kvm/vmx.c | 2 +- 3 files changed, 35 insertions(+), 9 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 992bc1058170..51ce635494b0 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -256,7 +256,7 @@ struct kvm_mmu_memory_cache { * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp. */ union kvm_mmu_page_role { - unsigned word; + u32 word; struct { unsigned level:4; unsigned cr4_pae:1; @@ -282,6 +282,18 @@ union kvm_mmu_page_role { }; }; +union kvm_mmu_extended_role { + u32 word; +}; + +union kvm_mmu_role { + u64 as_u64; + struct { + union kvm_mmu_page_role base; + union kvm_mmu_extended_role ext; + }; +}; + struct kvm_rmap_head { unsigned long val; }; @@ -369,7 +381,7 @@ struct kvm_mmu { void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, const void *pte); hpa_t root_hpa; - union kvm_mmu_page_role base_role; + union kvm_mmu_role mmu_role; u8 root_level; u8 shadow_root_level; u8 ept_ad; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index dd147fa5b3f4..3301973527aa 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2375,7 +2375,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, int collisions = 0; LIST_HEAD(invalid_list); - role = vcpu->arch.mmu->base_role; + role = vcpu->arch.mmu->mmu_role.base; role.level = level; role.direct = direct; if (role.direct) @@ -4423,7 +4423,8 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) { - bool uses_nx = context->nx || context->base_role.smep_andnot_wp; + bool uses_nx = context->nx || + context->mmu_role.base.smep_andnot_wp; struct rsvd_bits_validate *shadow_zero_check; int i; @@ -4742,7 +4743,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) { struct kvm_mmu *context = vcpu->arch.mmu; - context->base_role.word = mmu_base_role_mask.word & + context->mmu_role.base.word = mmu_base_role_mask.word & kvm_calc_tdp_mmu_root_page_role(vcpu).word; context->page_fault = tdp_page_fault; context->sync_page = nonpaging_sync_page; @@ -4823,7 +4824,7 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) else paging32_init_context(vcpu, context); - context->base_role.word = mmu_base_role_mask.word & + context->mmu_role.base.word = mmu_base_role_mask.word & kvm_calc_shadow_mmu_root_page_role(vcpu).word; reset_shadow_zero_bits_mask(vcpu, context); } @@ -4865,7 +4866,8 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, context->update_pte = ept_update_pte; context->root_level = PT64_ROOT_4LEVEL; context->direct_map = false; - context->base_role.word = root_page_role.word & mmu_base_role_mask.word; + context->mmu_role.base.word = + root_page_role.word & mmu_base_role_mask.word; update_permission_bitmask(vcpu, context, true); update_pkru_bitmask(vcpu, context, true); @@ -5179,10 +5181,12 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, local_flush = true; while (npte--) { + u32 base_role = vcpu->arch.mmu->mmu_role.base.word; + entry = *spte; mmu_page_zap_pte(vcpu->kvm, sp, spte); if (gentry && - !((sp->role.word ^ vcpu->arch.mmu->base_role.word) + !((sp->role.word ^ base_role) & mmu_base_role_mask.word) && rmap_can_add(vcpu)) mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); if (need_remote_flush(entry, *spte)) @@ -5879,6 +5883,16 @@ int kvm_mmu_module_init(void) { int ret = -ENOMEM; + /* + * MMU roles use union aliasing which is, generally speaking, an + * undefined behavior. However, we supposedly know how compilers behave + * and the current status quo is unlikely to change. Guardians below are + * supposed to let us know if the assumption becomes false. + */ + BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32)); + BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32)); + BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64)); + kvm_mmu_reset_all_pte_masks(); pte_list_desc_cache = kmem_cache_create("pte_list_desc", diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 02888031d038..6f44d3a63434 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -9263,7 +9263,7 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, kvm_mmu_unload(vcpu); mmu->ept_ad = accessed_dirty; - mmu->base_role.ad_disabled = !accessed_dirty; + mmu->mmu_role.base.ad_disabled = !accessed_dirty; vmcs12->ept_pointer = address; /* * TODO: Check what's the correct approach in case From a336282d7753a92ced7b8e52ff959929f6e550ff Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 8 Oct 2018 21:28:11 +0200 Subject: [PATCH 160/204] x86/kvm/nVMX: introduce source data cache for kvm_init_shadow_ept_mmu() MMU re-initialization is expensive, in particular, update_permission_bitmask() and update_pkru_bitmask() are. Cache the data used to setup shadow EPT MMU and avoid full re-init when it is unchanged. Signed-off-by: Vitaly Kuznetsov Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 14 +++++++++ arch/x86/kvm/mmu.c | 53 +++++++++++++++++++++++---------- 2 files changed, 52 insertions(+), 15 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 51ce635494b0..bf541af7442d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -283,7 +283,21 @@ union kvm_mmu_page_role { }; union kvm_mmu_extended_role { +/* + * This structure complements kvm_mmu_page_role caching everything needed for + * MMU configuration. If nothing in both these structures changed, MMU + * re-configuration can be skipped. @valid bit is set on first usage so we don't + * treat all-zero structure as valid data. + */ u32 word; + struct { + unsigned int valid:1; + unsigned int execonly:1; + unsigned int cr4_pse:1; + unsigned int cr4_pke:1; + unsigned int cr4_smap:1; + unsigned int cr4_smep:1; + }; }; union kvm_mmu_role { diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 3301973527aa..9d400a06c5f2 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4724,6 +4724,20 @@ static void paging32E_init_context(struct kvm_vcpu *vcpu, paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL); } +static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu) +{ + union kvm_mmu_extended_role ext = {0}; + + ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); + ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); + ext.cr4_pse = !!is_pse(vcpu); + ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE); + + ext.valid = 1; + + return ext; +} + static union kvm_mmu_page_role kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu) { @@ -4830,19 +4844,23 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); -static union kvm_mmu_page_role -kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty) +static union kvm_mmu_role +kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, + bool execonly) { - union kvm_mmu_page_role role; + union kvm_mmu_role role; - /* Role is inherited from root_mmu */ - role.word = vcpu->arch.root_mmu.base_role.word; + /* Base role is inherited from root_mmu */ + role.base.word = vcpu->arch.root_mmu.mmu_role.base.word; + role.ext = kvm_calc_mmu_role_ext(vcpu); - role.level = PT64_ROOT_4LEVEL; - role.direct = false; - role.ad_disabled = !accessed_dirty; - role.guest_mode = true; - role.access = ACC_ALL; + role.base.level = PT64_ROOT_4LEVEL; + role.base.direct = false; + role.base.ad_disabled = !accessed_dirty; + role.base.guest_mode = true; + role.base.access = ACC_ALL; + + role.ext.execonly = execonly; return role; } @@ -4851,10 +4869,16 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, bool accessed_dirty, gpa_t new_eptp) { struct kvm_mmu *context = vcpu->arch.mmu; - union kvm_mmu_page_role root_page_role = - kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty); + union kvm_mmu_role new_role = + kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty, + execonly); + + __kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base, false); + + new_role.base.word &= mmu_base_role_mask.word; + if (new_role.as_u64 == context->mmu_role.as_u64) + return; - __kvm_mmu_new_cr3(vcpu, new_eptp, root_page_role, false); context->shadow_root_level = PT64_ROOT_4LEVEL; context->nx = true; @@ -4866,8 +4890,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, context->update_pte = ept_update_pte; context->root_level = PT64_ROOT_4LEVEL; context->direct_map = false; - context->mmu_role.base.word = - root_page_role.word & mmu_base_role_mask.word; + context->mmu_role.as_u64 = new_role.as_u64; update_permission_bitmask(vcpu, context, true); update_pkru_bitmask(vcpu, context, true); From 7dcd575520082f186231777f4ac9f59ce14d6961 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 8 Oct 2018 21:28:12 +0200 Subject: [PATCH 161/204] x86/kvm/mmu: check if tdp/shadow MMU reconfiguration is needed MMU reconfiguration in init_kvm_tdp_mmu()/kvm_init_shadow_mmu() can be avoided if the source data used to configure it didn't change; enhance MMU extended role with the required fields and consolidate common code in kvm_calc_mmu_role_common(). Signed-off-by: Vitaly Kuznetsov Reviewed-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 + arch/x86/kvm/mmu.c | 91 +++++++++++++++++++++------------ 2 files changed, 61 insertions(+), 32 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index bf541af7442d..4b09d4aa9bf4 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -293,10 +293,12 @@ union kvm_mmu_extended_role { struct { unsigned int valid:1; unsigned int execonly:1; + unsigned int cr0_pg:1; unsigned int cr4_pse:1; unsigned int cr4_pke:1; unsigned int cr4_smap:1; unsigned int cr4_smep:1; + unsigned int cr4_la57:1; }; }; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 9d400a06c5f2..9ed046fbab5e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4728,27 +4728,46 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu) { union kvm_mmu_extended_role ext = {0}; + ext.cr0_pg = !!is_paging(vcpu); ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); ext.cr4_pse = !!is_pse(vcpu); ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE); + ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57); ext.valid = 1; return ext; } -static union kvm_mmu_page_role -kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu) +static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu, + bool base_only) { - union kvm_mmu_page_role role = {0}; + union kvm_mmu_role role = {0}; - role.guest_mode = is_guest_mode(vcpu); - role.smm = is_smm(vcpu); - role.ad_disabled = (shadow_accessed_mask == 0); - role.level = kvm_x86_ops->get_tdp_level(vcpu); - role.direct = true; - role.access = ACC_ALL; + role.base.access = ACC_ALL; + role.base.nxe = !!is_nx(vcpu); + role.base.cr4_pae = !!is_pae(vcpu); + role.base.cr0_wp = is_write_protection(vcpu); + role.base.smm = is_smm(vcpu); + role.base.guest_mode = is_guest_mode(vcpu); + + if (base_only) + return role; + + role.ext = kvm_calc_mmu_role_ext(vcpu); + + return role; +} + +static union kvm_mmu_role +kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) +{ + union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only); + + role.base.ad_disabled = (shadow_accessed_mask == 0); + role.base.level = kvm_x86_ops->get_tdp_level(vcpu); + role.base.direct = true; return role; } @@ -4756,9 +4775,14 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu) static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) { struct kvm_mmu *context = vcpu->arch.mmu; + union kvm_mmu_role new_role = + kvm_calc_tdp_mmu_root_page_role(vcpu, false); - context->mmu_role.base.word = mmu_base_role_mask.word & - kvm_calc_tdp_mmu_root_page_role(vcpu).word; + new_role.base.word &= mmu_base_role_mask.word; + if (new_role.as_u64 == context->mmu_role.as_u64) + return; + + context->mmu_role.as_u64 = new_role.as_u64; context->page_fault = tdp_page_fault; context->sync_page = nonpaging_sync_page; context->invlpg = nonpaging_invlpg; @@ -4798,29 +4822,23 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) reset_tdp_shadow_zero_bits_mask(vcpu, context); } -static union kvm_mmu_page_role -kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu) +static union kvm_mmu_role +kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) { - union kvm_mmu_page_role role = {0}; - bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); - bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); + union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only); - role.nxe = is_nx(vcpu); - role.cr4_pae = !!is_pae(vcpu); - role.cr0_wp = is_write_protection(vcpu); - role.smep_andnot_wp = smep && !is_write_protection(vcpu); - role.smap_andnot_wp = smap && !is_write_protection(vcpu); - role.guest_mode = is_guest_mode(vcpu); - role.smm = is_smm(vcpu); - role.direct = !is_paging(vcpu); - role.access = ACC_ALL; + role.base.smep_andnot_wp = role.ext.cr4_smep && + !is_write_protection(vcpu); + role.base.smap_andnot_wp = role.ext.cr4_smap && + !is_write_protection(vcpu); + role.base.direct = !is_paging(vcpu); if (!is_long_mode(vcpu)) - role.level = PT32E_ROOT_LEVEL; + role.base.level = PT32E_ROOT_LEVEL; else if (is_la57_mode(vcpu)) - role.level = PT64_ROOT_5LEVEL; + role.base.level = PT64_ROOT_5LEVEL; else - role.level = PT64_ROOT_4LEVEL; + role.base.level = PT64_ROOT_4LEVEL; return role; } @@ -4828,6 +4846,12 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu) void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) { struct kvm_mmu *context = vcpu->arch.mmu; + union kvm_mmu_role new_role = + kvm_calc_shadow_mmu_root_page_role(vcpu, false); + + new_role.base.word &= mmu_base_role_mask.word; + if (new_role.as_u64 == context->mmu_role.as_u64) + return; if (!is_paging(vcpu)) nonpaging_init_context(vcpu, context); @@ -4838,8 +4862,7 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) else paging32_init_context(vcpu, context); - context->mmu_role.base.word = mmu_base_role_mask.word & - kvm_calc_shadow_mmu_root_page_role(vcpu).word; + context->mmu_role.as_u64 = new_role.as_u64; reset_shadow_zero_bits_mask(vcpu, context); } EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); @@ -4977,10 +5000,14 @@ EXPORT_SYMBOL_GPL(kvm_init_mmu); static union kvm_mmu_page_role kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu) { + union kvm_mmu_role role; + if (tdp_enabled) - return kvm_calc_tdp_mmu_root_page_role(vcpu); + role = kvm_calc_tdp_mmu_root_page_role(vcpu, true); else - return kvm_calc_shadow_mmu_root_page_role(vcpu); + role = kvm_calc_shadow_mmu_root_page_role(vcpu, true); + + return role.base; } void kvm_mmu_reset_context(struct kvm_vcpu *vcpu) From bf627a9288372d065f157ea445d11b900575193e Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 8 Oct 2018 21:28:13 +0200 Subject: [PATCH 162/204] x86/kvm/mmu: check if MMU reconfiguration is needed in init_kvm_nested_mmu() We don't use root page role for nested_mmu, however, optimizing out re-initialization in case nothing changed is still valuable as this is done for every nested vmentry. Signed-off-by: Vitaly Kuznetsov Reviewed-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 9ed046fbab5e..c73d9f650de7 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4936,8 +4936,14 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu) static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) { + union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false); struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; + new_role.base.word &= mmu_base_role_mask.word; + if (new_role.as_u64 == g_context->mmu_role.as_u64) + return; + + g_context->mmu_role.as_u64 = new_role.as_u64; g_context->get_cr3 = get_cr3; g_context->get_pdptr = kvm_pdptr_read; g_context->inject_page_fault = kvm_inject_page_fault; From 5ebb272b2ea7e02911a03a893f8d922d49f9bb4a Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Thu, 11 Oct 2018 19:40:43 +0200 Subject: [PATCH 163/204] KVM/x86: Fix invvpid and invept register operand size in 64-bit mode Register operand size of invvpid and invept instruction in 64-bit mode has always 64 bits. Adjust inline function argument type to reflect correct size. Signed-off-by: Uros Bizjak Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 6f44d3a63434..05a0e8f80f68 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2070,7 +2070,7 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) return -1; } -static inline void __invvpid(int ext, u16 vpid, gva_t gva) +static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva) { struct { u64 vpid : 16; @@ -2085,7 +2085,7 @@ static inline void __invvpid(int ext, u16 vpid, gva_t gva) BUG_ON(error); } -static inline void __invept(int ext, u64 eptp, gpa_t gpa) +static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa) { struct { u64 eptp, gpa; From 4b1e54786e4862d3110bbfb27999c2c795013007 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Thu, 11 Oct 2018 19:40:44 +0200 Subject: [PATCH 164/204] KVM/x86: Use assembly instruction mnemonics instead of .byte streams Recently the minimum required version of binutils was changed to 2.20, which supports all VMX instruction mnemonics. The patch removes all .byte #defines and uses real instruction mnemonics instead. The compiler is now able to pass memory operand to the instruction, so there is no need for memory clobber anymore. Also, the compiler adds CC register clobber automatically to all extended asm clauses, so the patch also removes explicit CC clobber. The immediate benefit of the patch is removal of many unnecesary register moves, resulting in 1434 saved bytes in vmx.o: text data bss dec hex filename 151257 18246 8500 178003 2b753 vmx.o 152691 18246 8500 179437 2bced vmx-old.o Some examples of improvement include removal of unneeded moves of %rsp to %rax in front of invept and invvpid instructions: a57e: b9 01 00 00 00 mov $0x1,%ecx a583: 48 89 04 24 mov %rax,(%rsp) a587: 48 89 e0 mov %rsp,%rax a58a: 48 c7 44 24 08 00 00 movq $0x0,0x8(%rsp) a591: 00 00 a593: 66 0f 38 80 08 invept (%rax),%rcx to: a45c: 48 89 04 24 mov %rax,(%rsp) a460: b8 01 00 00 00 mov $0x1,%eax a465: 48 c7 44 24 08 00 00 movq $0x0,0x8(%rsp) a46c: 00 00 a46e: 66 0f 38 80 04 24 invept (%rsp),%rax and the ability to use more optimal registers and memory operands in the instruction: 8faa: 48 8b 44 24 28 mov 0x28(%rsp),%rax 8faf: 4c 89 c2 mov %r8,%rdx 8fb2: 0f 79 d0 vmwrite %rax,%rdx to: 8e7c: 44 0f 79 44 24 28 vmwrite 0x28(%rsp),%r8 Signed-off-by: Uros Bizjak Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/virtext.h | 2 +- arch/x86/include/asm/vmx.h | 13 ---------- arch/x86/kvm/vmx.c | 46 +++++++++++++++------------------- 3 files changed, 21 insertions(+), 40 deletions(-) diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index 0116b2ee9e64..449c92da2c91 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -40,7 +40,7 @@ static inline int cpu_has_vmx(void) */ static inline void cpu_vmxoff(void) { - asm volatile (ASM_VMX_VMXOFF : : : "cc"); + asm volatile ("vmxoff"); cr4_clear_bits(X86_CR4_VMXE); } diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 9527ba5d62da..ade0f153947d 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -503,19 +503,6 @@ enum vmcs_field { #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul - -#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30" -#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2" -#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3" -#define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30" -#define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0" -#define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0" -#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4" -#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4" -#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30" -#define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08" -#define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08" - struct vmx_msr_entry { u32 index; u32 reserved; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 05a0e8f80f68..c1b9bd15815c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2079,9 +2079,8 @@ static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva) } operand = { vpid, 0, gva }; bool error; - asm volatile (__ex(ASM_VMX_INVVPID) CC_SET(na) - : CC_OUT(na) (error) : "a"(&operand), "c"(ext) - : "memory"); + asm volatile (__ex("invvpid %2, %1") CC_SET(na) + : CC_OUT(na) (error) : "r"(ext), "m"(operand)); BUG_ON(error); } @@ -2092,9 +2091,8 @@ static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa) } operand = {eptp, gpa}; bool error; - asm volatile (__ex(ASM_VMX_INVEPT) CC_SET(na) - : CC_OUT(na) (error) : "a" (&operand), "c" (ext) - : "memory"); + asm volatile (__ex("invept %2, %1") CC_SET(na) + : CC_OUT(na) (error) : "r"(ext), "m"(operand)); BUG_ON(error); } @@ -2113,9 +2111,8 @@ static void vmcs_clear(struct vmcs *vmcs) u64 phys_addr = __pa(vmcs); bool error; - asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) CC_SET(na) - : CC_OUT(na) (error) : "a"(&phys_addr), "m"(phys_addr) - : "memory"); + asm volatile (__ex("vmclear %1") CC_SET(na) + : CC_OUT(na) (error) : "m"(phys_addr)); if (unlikely(error)) printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n", vmcs, phys_addr); @@ -2138,9 +2135,8 @@ static void vmcs_load(struct vmcs *vmcs) if (static_branch_unlikely(&enable_evmcs)) return evmcs_load(phys_addr); - asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) CC_SET(na) - : CC_OUT(na) (error) : "a"(&phys_addr), "m"(phys_addr) - : "memory"); + asm volatile (__ex("vmptrld %1") CC_SET(na) + : CC_OUT(na) (error) : "m"(phys_addr)); if (unlikely(error)) printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n", vmcs, phys_addr); @@ -2316,8 +2312,8 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field) { unsigned long value; - asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0") - : "=a"(value) : "d"(field) : "cc"); + asm volatile (__ex_clear("vmread %1, %0", "%0") + : "=r"(value) : "r"(field)); return value; } @@ -2368,8 +2364,8 @@ static __always_inline void __vmcs_writel(unsigned long field, unsigned long val { bool error; - asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) CC_SET(na) - : CC_OUT(na) (error) : "a"(value), "d"(field)); + asm volatile (__ex("vmwrite %2, %1") CC_SET(na) + : CC_OUT(na) (error) : "r"(field), "rm"(value)); if (unlikely(error)) vmwrite_error(field, value); } @@ -4396,9 +4392,7 @@ static void kvm_cpu_vmxon(u64 addr) cr4_set_bits(X86_CR4_VMXE); intel_pt_handle_vmx(1); - asm volatile (ASM_VMX_VMXON_RAX - : : "a"(&addr), "m"(addr) - : "memory", "cc"); + asm volatile ("vmxon %0" : : "m"(addr)); } static int hardware_enable(void) @@ -4467,7 +4461,7 @@ static void vmclear_local_loaded_vmcss(void) */ static void kvm_cpu_vmxoff(void) { - asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); + asm volatile (__ex("vmxoff")); intel_pt_handle_vmx(0); cr4_clear_bits(X86_CR4_VMXE); @@ -10712,7 +10706,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) "mov %%" _ASM_SP ", (%%" _ASM_SI ") \n\t" "jmp 1f \n\t" "2: \n\t" - __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t" + __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t" "1: \n\t" /* Reload cr2 if changed */ "mov %c[cr2](%0), %%" _ASM_AX " \n\t" @@ -10744,9 +10738,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) /* Enter guest mode */ "jne 1f \n\t" - __ex(ASM_VMX_VMLAUNCH) "\n\t" + __ex("vmlaunch") "\n\t" "jmp 2f \n\t" - "1: " __ex(ASM_VMX_VMRESUME) "\n\t" + "1: " __ex("vmresume") "\n\t" "2: " /* Save guest registers, load host registers, keep flags */ "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t" @@ -12702,15 +12696,15 @@ static int __noclone nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) asm( /* Set HOST_RSP */ - __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t" + __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t" "mov %%" _ASM_SP ", %c[host_rsp](%0)\n\t" /* Check if vmlaunch of vmresume is needed */ "cmpl $0, %c[launched](%0)\n\t" "je 1f\n\t" - __ex(ASM_VMX_VMRESUME) "\n\t" + __ex("vmresume") "\n\t" "jmp 2f\n\t" - "1: " __ex(ASM_VMX_VMLAUNCH) "\n\t" + "1: " __ex("vmlaunch") "\n\t" "jmp 2f\n\t" "2: " From 44c2d667ce3fa6933ac30a726f148f0c6d84ce5f Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Thu, 11 Oct 2018 19:40:45 +0200 Subject: [PATCH 165/204] KVM/x86: Use 32bit xor to clear register x86_64 zero-extends 32bit xor to a full 64bit register. Use %k asm operand modifier to force 32bit register and save 268 bytes in kvm.o Signed-off-by: Uros Bizjak Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index c1b9bd15815c..0050fe81d857 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -61,7 +61,7 @@ #define __ex(x) __kvm_handle_fault_on_reboot(x) #define __ex_clear(x, reg) \ - ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg) + ____kvm_handle_fault_on_reboot(x, "xorl " reg " , " reg) MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); @@ -2312,7 +2312,7 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field) { unsigned long value; - asm volatile (__ex_clear("vmread %1, %0", "%0") + asm volatile (__ex_clear("vmread %1, %0", "%k0") : "=r"(value) : "r"(field)); return value; } From a5c214dad1985092bd21c38c2a78befed9b02294 Mon Sep 17 00:00:00 2001 From: Lan Tianyu Date: Sat, 13 Oct 2018 22:54:05 +0800 Subject: [PATCH 166/204] KVM/VMX: Change hv flush logic when ept tables are mismatched. If ept table pointers are mismatched, flushing tlb for each vcpus via hv flush interface still helps to reduce vmexits which are triggered by IPI and INEPT emulation. Signed-off-by: Lan Tianyu Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 0050fe81d857..a9ed7a723a0c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1571,7 +1571,8 @@ static void check_ept_pointer_match(struct kvm *kvm) static int vmx_hv_remote_flush_tlb(struct kvm *kvm) { - int ret; + struct kvm_vcpu *vcpu; + int ret = -ENOTSUPP, i; spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock); @@ -1579,14 +1580,14 @@ static int vmx_hv_remote_flush_tlb(struct kvm *kvm) check_ept_pointer_match(kvm); if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) { - ret = -ENOTSUPP; - goto out; + kvm_for_each_vcpu(i, vcpu, kvm) + ret |= hyperv_flush_guest_mapping( + to_vmx(kvm_get_vcpu(kvm, i))->ept_pointer); + } else { + ret = hyperv_flush_guest_mapping( + to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer); } - ret = hyperv_flush_guest_mapping( - to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer); - -out: spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock); return ret; } From 39337ad1a783b8f2d3a708d62dc094dc5d8c9f29 Mon Sep 17 00:00:00 2001 From: Peng Hao Date: Thu, 4 Oct 2018 11:45:00 -0400 Subject: [PATCH 167/204] kvm/x86 : fix some typo Signed-off-by: Peng Hao Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index c73d9f650de7..dc60fcddcf96 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4563,7 +4563,7 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu, * SMAP:kernel-mode data accesses from user-mode * mappings should fault. A fault is considered * as a SMAP violation if all of the following - * conditions are ture: + * conditions are true: * - X86_CR4_SMAP is set in CR4 * - A user page is accessed * - The access is not a fetch @@ -5978,7 +5978,7 @@ int kvm_mmu_module_init(void) } /* - * Caculate mmu pages needed for kvm. + * Calculate mmu pages needed for kvm. */ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) { From 9943450b7b8831c5045362eed45f6fefd1986d72 Mon Sep 17 00:00:00 2001 From: Peng Hao Date: Sun, 14 Oct 2018 07:09:56 +0800 Subject: [PATCH 168/204] kvm/x86 : add document for coalesced mmio Signed-off-by: Peng Hao Signed-off-by: Paolo Bonzini --- Documentation/virtual/kvm/api.txt | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 48e5d1197295..10d48eb67da9 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -3681,6 +3681,30 @@ Returns: 0 on success, -1 on error This copies the vcpu's kvm_nested_state struct from userspace to the kernel. For the definition of struct kvm_nested_state, see KVM_GET_NESTED_STATE. +4.116 KVM_(UN)REGISTER_COALESCED_MMIO + +Capability: KVM_CAP_COALESCED_MMIO +Architectures: all +Type: vm ioctl +Parameters: struct kvm_coalesced_mmio_zone +Returns: 0 on success, < 0 on error + +Coalesced mmio is a performance optimization that defers hardware +register write emulation so that userspace exits are avoided. It is +typically used to reduce the overhead of emulating frequently accessed +hardware registers. + +When a hardware register is configured for coalesced mmio, write accesses +do not exit to userspace and their value is recorded in a ring buffer +that is shared between kernel and userspace. + +Coalesced mmio is used if one or more write accesses to a hardware +register can be deferred until a read or a write to another hardware +register on the same device. This last access will cause a vmexit and +userspace will process accesses from the ring buffer before emulating +it. That will avoid exiting to userspace on repeated writes to the +first register. + 5. The kvm_run structure ------------------------ From 0804c849f1df0992d39a37c4fc259f7f8b16f385 Mon Sep 17 00:00:00 2001 From: Peng Hao Date: Sun, 14 Oct 2018 07:09:55 +0800 Subject: [PATCH 169/204] kvm/x86 : add coalesced pio support Coalesced pio is based on coalesced mmio and can be used for some port like rtc port, pci-host config port and so on. Specially in case of rtc as coalesced pio, some versions of windows guest access rtc frequently because of rtc as system tick. guest access rtc like this: write register index to 0x70, then write or read data from 0x71. writing 0x70 port is just as index and do nothing else. So we can use coalesced pio to handle this scene to reduce VM-EXIT time. When starting and closing a virtual machine, it will access pci-host config port frequently. So setting these port as coalesced pio can reduce startup and shutdown time. without my patch, get the vm-exit time of accessing rtc 0x70 and piix 0xcf8 using perf tools: (guest OS : windows 7 64bit) IO Port Access Samples Samples% Time% Min Time Max Time Avg time 0x70:POUT 86 30.99% 74.59% 9us 29us 10.75us (+- 3.41%) 0xcf8:POUT 1119 2.60% 2.12% 2.79us 56.83us 3.41us (+- 2.23%) with my patch IO Port Access Samples Samples% Time% Min Time Max Time Avg time 0x70:POUT 106 32.02% 29.47% 0us 10us 1.57us (+- 7.38%) 0xcf8:POUT 1065 1.67% 0.28% 0.41us 65.44us 0.66us (+- 10.55%) Signed-off-by: Peng Hao Signed-off-by: Paolo Bonzini --- Documentation/virtual/kvm/api.txt | 16 ++++++++++------ include/uapi/linux/kvm.h | 11 +++++++++-- virt/kvm/coalesced_mmio.c | 12 +++++++++--- virt/kvm/kvm_main.c | 2 ++ 4 files changed, 30 insertions(+), 11 deletions(-) diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 10d48eb67da9..70f9c8bb1840 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -3683,27 +3683,31 @@ the definition of struct kvm_nested_state, see KVM_GET_NESTED_STATE. 4.116 KVM_(UN)REGISTER_COALESCED_MMIO -Capability: KVM_CAP_COALESCED_MMIO +Capability: KVM_CAP_COALESCED_MMIO (for coalesced mmio) + KVM_CAP_COALESCED_PIO (for coalesced pio) Architectures: all Type: vm ioctl Parameters: struct kvm_coalesced_mmio_zone Returns: 0 on success, < 0 on error -Coalesced mmio is a performance optimization that defers hardware +Coalesced I/O is a performance optimization that defers hardware register write emulation so that userspace exits are avoided. It is typically used to reduce the overhead of emulating frequently accessed hardware registers. -When a hardware register is configured for coalesced mmio, write accesses +When a hardware register is configured for coalesced I/O, write accesses do not exit to userspace and their value is recorded in a ring buffer that is shared between kernel and userspace. -Coalesced mmio is used if one or more write accesses to a hardware +Coalesced I/O is used if one or more write accesses to a hardware register can be deferred until a read or a write to another hardware register on the same device. This last access will cause a vmexit and userspace will process accesses from the ring buffer before emulating -it. That will avoid exiting to userspace on repeated writes to the -first register. +it. That will avoid exiting to userspace on repeated writes. + +Coalesced pio is based on coalesced mmio. There is little difference +between coalesced mmio and pio except that coalesced pio records accesses +to I/O ports. 5. The kvm_run structure ------------------------ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 7785678caedb..97780a0277fe 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -420,13 +420,19 @@ struct kvm_run { struct kvm_coalesced_mmio_zone { __u64 addr; __u32 size; - __u32 pad; + union { + __u32 pad; + __u32 pio; + }; }; struct kvm_coalesced_mmio { __u64 phys_addr; __u32 len; - __u32 pad; + union { + __u32 pad; + __u32 pio; + }; __u8 data[8]; }; @@ -956,6 +962,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_MSR_PLATFORM_INFO 159 #define KVM_CAP_PPC_NESTED_HV 160 #define KVM_CAP_HYPERV_SEND_IPI 161 +#define KVM_CAP_COALESCED_PIO 162 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index 9e65feb6fa58..3710342cf6ad 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c @@ -83,6 +83,7 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu, ring->coalesced_mmio[ring->last].phys_addr = addr; ring->coalesced_mmio[ring->last].len = len; memcpy(ring->coalesced_mmio[ring->last].data, val, len); + ring->coalesced_mmio[ring->last].pio = dev->zone.pio; smp_wmb(); ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; spin_unlock(&dev->kvm->ring_lock); @@ -140,6 +141,9 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, int ret; struct kvm_coalesced_mmio_dev *dev; + if (zone->pio != 1 && zone->pio != 0) + return -EINVAL; + dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL); if (!dev) return -ENOMEM; @@ -149,8 +153,9 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, dev->zone = *zone; mutex_lock(&kvm->slots_lock); - ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr, - zone->size, &dev->dev); + ret = kvm_io_bus_register_dev(kvm, + zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, + zone->addr, zone->size, &dev->dev); if (ret < 0) goto out_free_dev; list_add_tail(&dev->list, &kvm->coalesced_zones); @@ -174,7 +179,8 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) { - kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev); + kvm_io_bus_unregister_dev(kvm, + zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev); kvm_iodevice_destructor(&dev->dev); } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index acc951cc2663..067b71abae00 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2949,6 +2949,8 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) #ifdef CONFIG_KVM_MMIO case KVM_CAP_COALESCED_MMIO: return KVM_COALESCED_MMIO_PAGE_OFFSET; + case KVM_CAP_COALESCED_PIO: + return 1; #endif #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING case KVM_CAP_IRQ_ROUTING: From 8daf346226b22e565baf27ccaec58fed9e108b42 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Thu, 4 Oct 2018 10:04:22 +0800 Subject: [PATCH 170/204] KVM: x86: rename pte_list_remove to __pte_list_remove This is a patch preparing for further change. Signed-off-by: Wei Yang Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index dc60fcddcf96..175bfb6766db 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1265,24 +1265,24 @@ pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head, mmu_free_pte_list_desc(desc); } -static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) +static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) { struct pte_list_desc *desc; struct pte_list_desc *prev_desc; int i; if (!rmap_head->val) { - printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte); + pr_err("%s: %p 0->BUG\n", __func__, spte); BUG(); } else if (!(rmap_head->val & 1)) { - rmap_printk("pte_list_remove: %p 1->0\n", spte); + rmap_printk("%s: %p 1->0\n", __func__, spte); if ((u64 *)rmap_head->val != spte) { - printk(KERN_ERR "pte_list_remove: %p 1->BUG\n", spte); + pr_err("%s: %p 1->BUG\n", __func__, spte); BUG(); } rmap_head->val = 0; } else { - rmap_printk("pte_list_remove: %p many->many\n", spte); + rmap_printk("%s: %p many->many\n", __func__, spte); desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); prev_desc = NULL; while (desc) { @@ -1296,7 +1296,7 @@ static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) prev_desc = desc; desc = desc->more; } - pr_err("pte_list_remove: %p many->many\n", spte); + pr_err("%s: %p many->many\n", __func__, spte); BUG(); } } @@ -1349,7 +1349,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) sp = page_header(__pa(spte)); gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); rmap_head = gfn_to_rmap(kvm, gfn, sp); - pte_list_remove(spte, rmap_head); + __pte_list_remove(spte, rmap_head); } /* @@ -1988,7 +1988,7 @@ static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, u64 *parent_pte) { - pte_list_remove(parent_pte, &sp->parent_ptes); + __pte_list_remove(parent_pte, &sp->parent_ptes); } static void drop_parent_pte(struct kvm_mmu_page *sp, From e7912386ede89d2aa1f72ebb74969e48e9d5f56c Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Thu, 4 Oct 2018 10:04:23 +0800 Subject: [PATCH 171/204] KVM: x86: reintroduce pte_list_remove, but including mmu_spte_clear_track_bits rmap_remove() removes the sptep after locating the correct rmap_head but, in several cases, the caller has already known the correct rmap_head. This patch introduces a new pte_list_remove(); because it is known that the spte is present (or it would not have an rmap_head), it is safe to remove the tracking bits without any previous check. Signed-off-by: Wei Yang Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 175bfb6766db..4cf43ce42959 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1301,6 +1301,12 @@ static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) } } +static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep) +{ + mmu_spte_clear_track_bits(sptep); + __pte_list_remove(sptep, rmap_head); +} + static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level, struct kvm_memory_slot *slot) { @@ -1685,7 +1691,7 @@ static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head) while ((sptep = rmap_get_first(rmap_head, &iter))) { rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep); - drop_spte(kvm, sptep); + pte_list_remove(rmap_head, sptep); flush = true; } @@ -1721,7 +1727,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, need_flush = 1; if (pte_write(*ptep)) { - drop_spte(kvm, sptep); + pte_list_remove(rmap_head, sptep); goto restart; } else { new_spte = *sptep & ~PT64_BASE_ADDR_MASK; @@ -5682,7 +5688,7 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, if (sp->role.direct && !kvm_is_reserved_pfn(pfn) && PageTransCompoundMap(pfn_to_page(pfn))) { - drop_spte(kvm, sptep); + pte_list_remove(rmap_head, sptep); need_tlb_flush = 1; goto restart; } From 970c0d4b94efcba5610b37f0118cb1a2286ba962 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Tue, 9 Oct 2018 10:41:15 +0800 Subject: [PATCH 172/204] KVM: refine the comment of function gfn_to_hva_memslot_prot() The original comment is little hard to understand. No functional change, just amend the comment a little. Signed-off-by: Wei Yang Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 067b71abae00..786ade1843a2 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1314,8 +1314,12 @@ unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva); /* - * If writable is set to false, the hva returned by this function is only - * allowed to be read. + * Return the hva of a @gfn and the R/W attribute if possible. + * + * @slot: the kvm_memory_slot which contains @gfn + * @gfn: the gfn to be translated + * @writable: used to return the read/write attribute of the @slot if the hva + * is valid and @writable is not NULL */ unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, bool *writable) From 72bbf9358c3676bd89dc4bd8fb0b1f2a11c288fc Mon Sep 17 00:00:00 2001 From: Ladi Prosek Date: Tue, 16 Oct 2018 18:49:59 +0200 Subject: [PATCH 173/204] KVM: hyperv: define VP assist page helpers The state related to the VP assist page is still managed by the LAPIC code in the pv_eoi field. Signed-off-by: Ladi Prosek Signed-off-by: Vitaly Kuznetsov Reviewed-by: Liran Alon Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 23 +++++++++++++++++++++-- arch/x86/kvm/hyperv.h | 4 ++++ arch/x86/kvm/lapic.c | 4 ++-- arch/x86/kvm/lapic.h | 2 +- arch/x86/kvm/x86.c | 2 +- 5 files changed, 29 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index df9ef8f1d2c8..d9b7b4cd4e43 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -693,6 +693,24 @@ void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) stimer_cleanup(&hv_vcpu->stimer[i]); } +bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu) +{ + if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) + return false; + return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; +} +EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled); + +bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu, + struct hv_vp_assist_page *assist_page) +{ + if (!kvm_hv_assist_page_enabled(vcpu)) + return false; + return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, + assist_page, sizeof(*assist_page)); +} +EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page); + static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer) { struct hv_message *msg = &stimer->msg; @@ -1078,7 +1096,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) { hv_vcpu->hv_vapic = data; - if (kvm_lapic_enable_pv_eoi(vcpu, 0)) + if (kvm_lapic_enable_pv_eoi(vcpu, 0, 0)) return 1; break; } @@ -1091,7 +1109,8 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) hv_vcpu->hv_vapic = data; kvm_vcpu_mark_page_dirty(vcpu, gfn); if (kvm_lapic_enable_pv_eoi(vcpu, - gfn_to_gpa(gfn) | KVM_MSR_ENABLED)) + gfn_to_gpa(gfn) | KVM_MSR_ENABLED, + sizeof(struct hv_vp_assist_page))) return 1; break; } diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h index d6aa969e20f1..0e66c12ed2c3 100644 --- a/arch/x86/kvm/hyperv.h +++ b/arch/x86/kvm/hyperv.h @@ -62,6 +62,10 @@ void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu); void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu); void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu); +bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu); +bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu, + struct hv_vp_assist_page *assist_page); + static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu, int timer_index) { diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 452eed992aea..79358fd6a71c 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2644,7 +2644,7 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data) return 0; } -int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) +int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len) { u64 addr = data & ~KVM_MSR_ENABLED; if (!IS_ALIGNED(addr, 4)) @@ -2654,7 +2654,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) if (!pv_eoi_enabled(vcpu)) return 0; return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, - addr, sizeof(u8)); + addr, len); } void kvm_apic_accept_events(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index ed0ed39abd36..ff6ef9c3d760 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -120,7 +120,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu) return vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE; } -int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data); +int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len); void kvm_lapic_init(void); void kvm_lapic_exit(void); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2750270b7a39..eee871ad4ade 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2477,7 +2477,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_KVM_PV_EOI_EN: - if (kvm_lapic_enable_pv_eoi(vcpu, data)) + if (kvm_lapic_enable_pv_eoi(vcpu, data, sizeof(u8))) return 1; break; From 5d7a6443368e48ca041fea086e345420c027a8cd Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 16 Oct 2018 18:50:00 +0200 Subject: [PATCH 174/204] KVM: VMX: refactor evmcs_sanitize_exec_ctrls() Split off EVMCS1_UNSUPPORTED_* macros so we can re-use them when enabling Enlightened VMCS for Hyper-V on KVM. Signed-off-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 108 ++++++++++++++++++++------------------------- 1 file changed, 47 insertions(+), 61 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a9ed7a723a0c..509d4e34dd62 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1392,6 +1392,49 @@ DEFINE_STATIC_KEY_FALSE(enable_evmcs); #define KVM_EVMCS_VERSION 1 +/* + * Enlightened VMCSv1 doesn't support these: + * + * POSTED_INTR_NV = 0x00000002, + * GUEST_INTR_STATUS = 0x00000810, + * APIC_ACCESS_ADDR = 0x00002014, + * POSTED_INTR_DESC_ADDR = 0x00002016, + * EOI_EXIT_BITMAP0 = 0x0000201c, + * EOI_EXIT_BITMAP1 = 0x0000201e, + * EOI_EXIT_BITMAP2 = 0x00002020, + * EOI_EXIT_BITMAP3 = 0x00002022, + * GUEST_PML_INDEX = 0x00000812, + * PML_ADDRESS = 0x0000200e, + * VM_FUNCTION_CONTROL = 0x00002018, + * EPTP_LIST_ADDRESS = 0x00002024, + * VMREAD_BITMAP = 0x00002026, + * VMWRITE_BITMAP = 0x00002028, + * + * TSC_MULTIPLIER = 0x00002032, + * PLE_GAP = 0x00004020, + * PLE_WINDOW = 0x00004022, + * VMX_PREEMPTION_TIMER_VALUE = 0x0000482E, + * GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808, + * HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04, + * + * Currently unsupported in KVM: + * GUEST_IA32_RTIT_CTL = 0x00002814, + */ +#define EVMCS1_UNSUPPORTED_PINCTRL (PIN_BASED_POSTED_INTR | \ + PIN_BASED_VMX_PREEMPTION_TIMER) +#define EVMCS1_UNSUPPORTED_2NDEXEC \ + (SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | \ + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | \ + SECONDARY_EXEC_APIC_REGISTER_VIRT | \ + SECONDARY_EXEC_ENABLE_PML | \ + SECONDARY_EXEC_ENABLE_VMFUNC | \ + SECONDARY_EXEC_SHADOW_VMCS | \ + SECONDARY_EXEC_TSC_SCALING | \ + SECONDARY_EXEC_PAUSE_LOOP_EXITING) +#define EVMCS1_UNSUPPORTED_VMEXIT_CTRL (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) +#define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) +#define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING) + #if IS_ENABLED(CONFIG_HYPERV) static bool __read_mostly enlightened_vmcs = true; module_param(enlightened_vmcs, bool, 0444); @@ -1484,69 +1527,12 @@ static void evmcs_load(u64 phys_addr) static void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) { - /* - * Enlightened VMCSv1 doesn't support these: - * - * POSTED_INTR_NV = 0x00000002, - * GUEST_INTR_STATUS = 0x00000810, - * APIC_ACCESS_ADDR = 0x00002014, - * POSTED_INTR_DESC_ADDR = 0x00002016, - * EOI_EXIT_BITMAP0 = 0x0000201c, - * EOI_EXIT_BITMAP1 = 0x0000201e, - * EOI_EXIT_BITMAP2 = 0x00002020, - * EOI_EXIT_BITMAP3 = 0x00002022, - */ - vmcs_conf->pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR; - vmcs_conf->cpu_based_2nd_exec_ctrl &= - ~SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY; - vmcs_conf->cpu_based_2nd_exec_ctrl &= - ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; - vmcs_conf->cpu_based_2nd_exec_ctrl &= - ~SECONDARY_EXEC_APIC_REGISTER_VIRT; + vmcs_conf->pin_based_exec_ctrl &= ~EVMCS1_UNSUPPORTED_PINCTRL; + vmcs_conf->cpu_based_2nd_exec_ctrl &= ~EVMCS1_UNSUPPORTED_2NDEXEC; - /* - * GUEST_PML_INDEX = 0x00000812, - * PML_ADDRESS = 0x0000200e, - */ - vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_ENABLE_PML; + vmcs_conf->vmexit_ctrl &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL; + vmcs_conf->vmentry_ctrl &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL; - /* VM_FUNCTION_CONTROL = 0x00002018, */ - vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_ENABLE_VMFUNC; - - /* - * EPTP_LIST_ADDRESS = 0x00002024, - * VMREAD_BITMAP = 0x00002026, - * VMWRITE_BITMAP = 0x00002028, - */ - vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_SHADOW_VMCS; - - /* - * TSC_MULTIPLIER = 0x00002032, - */ - vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_TSC_SCALING; - - /* - * PLE_GAP = 0x00004020, - * PLE_WINDOW = 0x00004022, - */ - vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; - - /* - * VMX_PREEMPTION_TIMER_VALUE = 0x0000482E, - */ - vmcs_conf->pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER; - - /* - * GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808, - * HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04, - */ - vmcs_conf->vmexit_ctrl &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; - vmcs_conf->vmentry_ctrl &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; - - /* - * Currently unsupported in KVM: - * GUEST_IA32_RTIT_CTL = 0x00002814, - */ } /* check_ept_pointer() should be under protection of ept_pointer_lock. */ From 57b119da3594f5145a64fdebe0ac9ee0cc65f371 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 16 Oct 2018 18:50:01 +0200 Subject: [PATCH 175/204] KVM: nVMX: add KVM_CAP_HYPERV_ENLIGHTENED_VMCS capability Enlightened VMCS is opt-in. The current version does not contain all fields supported by nested VMX so we must not advertise the corresponding VMX features if enlightened VMCS is enabled. Userspace is given the enlightened VMCS version supported by KVM as part of enabling KVM_CAP_HYPERV_ENLIGHTENED_VMCS. The version is to be advertised to the nested hypervisor, currently done via a cpuid leaf for Hyper-V. Suggested-by: Ladi Prosek Signed-off-by: Vitaly Kuznetsov Reviewed-by: Liran Alon Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 3 +++ arch/x86/kvm/svm.c | 9 ++++++++ arch/x86/kvm/vmx.c | 37 +++++++++++++++++++++++++++++++++ arch/x86/kvm/x86.c | 15 +++++++++++++ include/uapi/linux/kvm.h | 1 + 5 files changed, 65 insertions(+) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4b09d4aa9bf4..258fc2c85301 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1179,6 +1179,9 @@ struct kvm_x86_ops { int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp); int (*get_msr_feature)(struct kvm_msr_entry *entry); + + int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu, + uint16_t *vmcs_version); }; struct kvm_arch_async_pf { diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 2936c63bcc2f..47b07211c5b1 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -7036,6 +7036,13 @@ static int svm_unregister_enc_region(struct kvm *kvm, return ret; } +static int nested_enable_evmcs(struct kvm_vcpu *vcpu, + uint16_t *vmcs_version) +{ + /* Intel-only feature */ + return -ENODEV; +} + static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .cpu_has_kvm_support = has_svm, .disabled_by_bios = is_disabled, @@ -7165,6 +7172,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .mem_enc_op = svm_mem_enc_op, .mem_enc_reg_region = svm_register_enc_region, .mem_enc_unreg_region = svm_unregister_enc_region, + + .nested_enable_evmcs = nested_enable_evmcs, }; static int __init svm_init(void) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 509d4e34dd62..459cdaa0d1cd 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -846,6 +846,13 @@ struct nested_vmx { bool change_vmcs01_virtual_apic_mode; + /* + * Enlightened VMCS has been enabled. It does not mean that L1 has to + * use it. However, VMX features available to L1 will be limited based + * on what the enlightened VMCS supports. + */ + bool enlightened_vmcs_enabled; + /* L2 must run next, and mustn't decide to exit to L1. */ bool nested_run_pending; @@ -1589,6 +1596,34 @@ static inline void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) {} static inline void evmcs_touch_msr_bitmap(void) {} #endif /* IS_ENABLED(CONFIG_HYPERV) */ +static int nested_enable_evmcs(struct kvm_vcpu *vcpu, + uint16_t *vmcs_version) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + /* We don't support disabling the feature for simplicity. */ + if (vmx->nested.enlightened_vmcs_enabled) + return 0; + + vmx->nested.enlightened_vmcs_enabled = true; + + /* + * vmcs_version represents the range of supported Enlightened VMCS + * versions: lower 8 bits is the minimal version, higher 8 bits is the + * maximum supported version. KVM supports versions from 1 to + * KVM_EVMCS_VERSION. + */ + *vmcs_version = (KVM_EVMCS_VERSION << 8) | 1; + + vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL; + vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL; + vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL; + vmx->nested.msrs.secondary_ctls_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC; + vmx->nested.msrs.vmfunc_controls &= ~EVMCS1_UNSUPPORTED_VMFUNC; + + return 0; +} + static inline bool is_exception_n(u32 intr_info, u8 vector) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | @@ -14505,6 +14540,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .pre_enter_smm = vmx_pre_enter_smm, .pre_leave_smm = vmx_pre_leave_smm, .enable_smi_window = enable_smi_window, + + .nested_enable_evmcs = nested_enable_evmcs, }; static void vmx_cleanup_l1d_flush(void) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index eee871ad4ade..50f308499ce5 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2913,6 +2913,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_HYPERV_EVENTFD: case KVM_CAP_HYPERV_TLBFLUSH: case KVM_CAP_HYPERV_SEND_IPI: + case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: case KVM_CAP_PCI_SEGMENT: case KVM_CAP_DEBUGREGS: case KVM_CAP_X86_ROBUST_SINGLESTEP: @@ -3700,6 +3701,10 @@ static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, struct kvm_enable_cap *cap) { + int r; + uint16_t vmcs_version; + void __user *user_ptr; + if (cap->flags) return -EINVAL; @@ -3712,6 +3717,16 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, return -EINVAL; return kvm_hv_activate_synic(vcpu, cap->cap == KVM_CAP_HYPERV_SYNIC2); + case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: + r = kvm_x86_ops->nested_enable_evmcs(vcpu, &vmcs_version); + if (!r) { + user_ptr = (void __user *)(uintptr_t)cap->args[0]; + if (copy_to_user(user_ptr, &vmcs_version, + sizeof(vmcs_version))) + r = -EFAULT; + } + return r; + default: return -EINVAL; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 97780a0277fe..a2f2b8845502 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -963,6 +963,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_PPC_NESTED_HV 160 #define KVM_CAP_HYPERV_SEND_IPI 161 #define KVM_CAP_COALESCED_PIO 162 +#define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163 #ifdef KVM_CAP_IRQ_ROUTING From 945679e301ea0de1148b64c7c3273d92059e7231 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 16 Oct 2018 18:50:02 +0200 Subject: [PATCH 176/204] KVM: nVMX: add enlightened VMCS state Adds hv_evmcs pointer and implement copy_enlightened_to_vmcs12() and copy_enlightened_to_vmcs12(). prepare_vmcs02()/prepare_vmcs02_full() separation is not valid for Enlightened VMCS, do full sync for now. Suggested-by: Ladi Prosek Signed-off-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 440 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 423 insertions(+), 17 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 459cdaa0d1cd..aebd008ccccb 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -831,10 +831,10 @@ struct nested_vmx { */ struct vmcs12 *cached_shadow_vmcs12; /* - * Indicates if the shadow vmcs must be updated with the - * data hold by vmcs12 + * Indicates if the shadow vmcs or enlightened vmcs must be updated + * with the data held by struct vmcs12. */ - bool sync_shadow_vmcs; + bool need_vmcs12_sync; bool dirty_vmcs12; /* @@ -888,6 +888,8 @@ struct nested_vmx { /* in guest mode on SMM entry? */ bool guest_mode; } smm; + + struct hv_enlightened_vmcs *hv_evmcs; }; #define POSTED_INTR_ON 0 @@ -8450,7 +8452,7 @@ static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu) /* copy to memory all shadowed fields in case they were modified */ copy_shadow_to_vmcs12(vmx); - vmx->nested.sync_shadow_vmcs = false; + vmx->nested.need_vmcs12_sync = false; vmx_disable_shadow_vmcs(vmx); } vmx->nested.posted_intr_nv = -1; @@ -8630,6 +8632,393 @@ static inline int vmcs12_write_any(struct vmcs12 *vmcs12, } +static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx) +{ + struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; + struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; + + /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */ + vmcs12->tpr_threshold = evmcs->tpr_threshold; + vmcs12->guest_rip = evmcs->guest_rip; + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) { + vmcs12->guest_rsp = evmcs->guest_rsp; + vmcs12->guest_rflags = evmcs->guest_rflags; + vmcs12->guest_interruptibility_info = + evmcs->guest_interruptibility_info; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) { + vmcs12->cpu_based_vm_exec_control = + evmcs->cpu_based_vm_exec_control; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) { + vmcs12->exception_bitmap = evmcs->exception_bitmap; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) { + vmcs12->vm_entry_controls = evmcs->vm_entry_controls; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) { + vmcs12->vm_entry_intr_info_field = + evmcs->vm_entry_intr_info_field; + vmcs12->vm_entry_exception_error_code = + evmcs->vm_entry_exception_error_code; + vmcs12->vm_entry_instruction_len = + evmcs->vm_entry_instruction_len; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) { + vmcs12->host_ia32_pat = evmcs->host_ia32_pat; + vmcs12->host_ia32_efer = evmcs->host_ia32_efer; + vmcs12->host_cr0 = evmcs->host_cr0; + vmcs12->host_cr3 = evmcs->host_cr3; + vmcs12->host_cr4 = evmcs->host_cr4; + vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp; + vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip; + vmcs12->host_rip = evmcs->host_rip; + vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs; + vmcs12->host_es_selector = evmcs->host_es_selector; + vmcs12->host_cs_selector = evmcs->host_cs_selector; + vmcs12->host_ss_selector = evmcs->host_ss_selector; + vmcs12->host_ds_selector = evmcs->host_ds_selector; + vmcs12->host_fs_selector = evmcs->host_fs_selector; + vmcs12->host_gs_selector = evmcs->host_gs_selector; + vmcs12->host_tr_selector = evmcs->host_tr_selector; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) { + vmcs12->pin_based_vm_exec_control = + evmcs->pin_based_vm_exec_control; + vmcs12->vm_exit_controls = evmcs->vm_exit_controls; + vmcs12->secondary_vm_exec_control = + evmcs->secondary_vm_exec_control; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) { + vmcs12->io_bitmap_a = evmcs->io_bitmap_a; + vmcs12->io_bitmap_b = evmcs->io_bitmap_b; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) { + vmcs12->msr_bitmap = evmcs->msr_bitmap; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) { + vmcs12->guest_es_base = evmcs->guest_es_base; + vmcs12->guest_cs_base = evmcs->guest_cs_base; + vmcs12->guest_ss_base = evmcs->guest_ss_base; + vmcs12->guest_ds_base = evmcs->guest_ds_base; + vmcs12->guest_fs_base = evmcs->guest_fs_base; + vmcs12->guest_gs_base = evmcs->guest_gs_base; + vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base; + vmcs12->guest_tr_base = evmcs->guest_tr_base; + vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base; + vmcs12->guest_idtr_base = evmcs->guest_idtr_base; + vmcs12->guest_es_limit = evmcs->guest_es_limit; + vmcs12->guest_cs_limit = evmcs->guest_cs_limit; + vmcs12->guest_ss_limit = evmcs->guest_ss_limit; + vmcs12->guest_ds_limit = evmcs->guest_ds_limit; + vmcs12->guest_fs_limit = evmcs->guest_fs_limit; + vmcs12->guest_gs_limit = evmcs->guest_gs_limit; + vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit; + vmcs12->guest_tr_limit = evmcs->guest_tr_limit; + vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit; + vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit; + vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes; + vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes; + vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes; + vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes; + vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes; + vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes; + vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes; + vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes; + vmcs12->guest_es_selector = evmcs->guest_es_selector; + vmcs12->guest_cs_selector = evmcs->guest_cs_selector; + vmcs12->guest_ss_selector = evmcs->guest_ss_selector; + vmcs12->guest_ds_selector = evmcs->guest_ds_selector; + vmcs12->guest_fs_selector = evmcs->guest_fs_selector; + vmcs12->guest_gs_selector = evmcs->guest_gs_selector; + vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector; + vmcs12->guest_tr_selector = evmcs->guest_tr_selector; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) { + vmcs12->tsc_offset = evmcs->tsc_offset; + vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr; + vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) { + vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask; + vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask; + vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow; + vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow; + vmcs12->guest_cr0 = evmcs->guest_cr0; + vmcs12->guest_cr3 = evmcs->guest_cr3; + vmcs12->guest_cr4 = evmcs->guest_cr4; + vmcs12->guest_dr7 = evmcs->guest_dr7; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) { + vmcs12->host_fs_base = evmcs->host_fs_base; + vmcs12->host_gs_base = evmcs->host_gs_base; + vmcs12->host_tr_base = evmcs->host_tr_base; + vmcs12->host_gdtr_base = evmcs->host_gdtr_base; + vmcs12->host_idtr_base = evmcs->host_idtr_base; + vmcs12->host_rsp = evmcs->host_rsp; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) { + vmcs12->ept_pointer = evmcs->ept_pointer; + vmcs12->virtual_processor_id = evmcs->virtual_processor_id; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) { + vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer; + vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl; + vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat; + vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer; + vmcs12->guest_pdptr0 = evmcs->guest_pdptr0; + vmcs12->guest_pdptr1 = evmcs->guest_pdptr1; + vmcs12->guest_pdptr2 = evmcs->guest_pdptr2; + vmcs12->guest_pdptr3 = evmcs->guest_pdptr3; + vmcs12->guest_pending_dbg_exceptions = + evmcs->guest_pending_dbg_exceptions; + vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp; + vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip; + vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs; + vmcs12->guest_activity_state = evmcs->guest_activity_state; + vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs; + } + + /* + * Not used? + * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr; + * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr; + * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr; + * vmcs12->cr3_target_value0 = evmcs->cr3_target_value0; + * vmcs12->cr3_target_value1 = evmcs->cr3_target_value1; + * vmcs12->cr3_target_value2 = evmcs->cr3_target_value2; + * vmcs12->cr3_target_value3 = evmcs->cr3_target_value3; + * vmcs12->page_fault_error_code_mask = + * evmcs->page_fault_error_code_mask; + * vmcs12->page_fault_error_code_match = + * evmcs->page_fault_error_code_match; + * vmcs12->cr3_target_count = evmcs->cr3_target_count; + * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count; + * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count; + * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count; + */ + + /* + * Read only fields: + * vmcs12->guest_physical_address = evmcs->guest_physical_address; + * vmcs12->vm_instruction_error = evmcs->vm_instruction_error; + * vmcs12->vm_exit_reason = evmcs->vm_exit_reason; + * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info; + * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code; + * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field; + * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code; + * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len; + * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info; + * vmcs12->exit_qualification = evmcs->exit_qualification; + * vmcs12->guest_linear_address = evmcs->guest_linear_address; + * + * Not present in struct vmcs12: + * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx; + * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi; + * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi; + * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip; + */ + + return 0; +} + +static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) +{ + struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; + struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; + + /* + * Should not be changed by KVM: + * + * evmcs->host_es_selector = vmcs12->host_es_selector; + * evmcs->host_cs_selector = vmcs12->host_cs_selector; + * evmcs->host_ss_selector = vmcs12->host_ss_selector; + * evmcs->host_ds_selector = vmcs12->host_ds_selector; + * evmcs->host_fs_selector = vmcs12->host_fs_selector; + * evmcs->host_gs_selector = vmcs12->host_gs_selector; + * evmcs->host_tr_selector = vmcs12->host_tr_selector; + * evmcs->host_ia32_pat = vmcs12->host_ia32_pat; + * evmcs->host_ia32_efer = vmcs12->host_ia32_efer; + * evmcs->host_cr0 = vmcs12->host_cr0; + * evmcs->host_cr3 = vmcs12->host_cr3; + * evmcs->host_cr4 = vmcs12->host_cr4; + * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp; + * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip; + * evmcs->host_rip = vmcs12->host_rip; + * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs; + * evmcs->host_fs_base = vmcs12->host_fs_base; + * evmcs->host_gs_base = vmcs12->host_gs_base; + * evmcs->host_tr_base = vmcs12->host_tr_base; + * evmcs->host_gdtr_base = vmcs12->host_gdtr_base; + * evmcs->host_idtr_base = vmcs12->host_idtr_base; + * evmcs->host_rsp = vmcs12->host_rsp; + * sync_vmcs12() doesn't read these: + * evmcs->io_bitmap_a = vmcs12->io_bitmap_a; + * evmcs->io_bitmap_b = vmcs12->io_bitmap_b; + * evmcs->msr_bitmap = vmcs12->msr_bitmap; + * evmcs->ept_pointer = vmcs12->ept_pointer; + * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap; + * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr; + * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr; + * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr; + * evmcs->cr3_target_value0 = vmcs12->cr3_target_value0; + * evmcs->cr3_target_value1 = vmcs12->cr3_target_value1; + * evmcs->cr3_target_value2 = vmcs12->cr3_target_value2; + * evmcs->cr3_target_value3 = vmcs12->cr3_target_value3; + * evmcs->tpr_threshold = vmcs12->tpr_threshold; + * evmcs->virtual_processor_id = vmcs12->virtual_processor_id; + * evmcs->exception_bitmap = vmcs12->exception_bitmap; + * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer; + * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control; + * evmcs->vm_exit_controls = vmcs12->vm_exit_controls; + * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control; + * evmcs->page_fault_error_code_mask = + * vmcs12->page_fault_error_code_mask; + * evmcs->page_fault_error_code_match = + * vmcs12->page_fault_error_code_match; + * evmcs->cr3_target_count = vmcs12->cr3_target_count; + * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr; + * evmcs->tsc_offset = vmcs12->tsc_offset; + * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl; + * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask; + * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask; + * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow; + * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow; + * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count; + * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count; + * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count; + * + * Not present in struct vmcs12: + * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx; + * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi; + * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi; + * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip; + */ + + evmcs->guest_es_selector = vmcs12->guest_es_selector; + evmcs->guest_cs_selector = vmcs12->guest_cs_selector; + evmcs->guest_ss_selector = vmcs12->guest_ss_selector; + evmcs->guest_ds_selector = vmcs12->guest_ds_selector; + evmcs->guest_fs_selector = vmcs12->guest_fs_selector; + evmcs->guest_gs_selector = vmcs12->guest_gs_selector; + evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector; + evmcs->guest_tr_selector = vmcs12->guest_tr_selector; + + evmcs->guest_es_limit = vmcs12->guest_es_limit; + evmcs->guest_cs_limit = vmcs12->guest_cs_limit; + evmcs->guest_ss_limit = vmcs12->guest_ss_limit; + evmcs->guest_ds_limit = vmcs12->guest_ds_limit; + evmcs->guest_fs_limit = vmcs12->guest_fs_limit; + evmcs->guest_gs_limit = vmcs12->guest_gs_limit; + evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit; + evmcs->guest_tr_limit = vmcs12->guest_tr_limit; + evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit; + evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit; + + evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes; + evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes; + evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes; + evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes; + evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes; + evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes; + evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes; + evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes; + + evmcs->guest_es_base = vmcs12->guest_es_base; + evmcs->guest_cs_base = vmcs12->guest_cs_base; + evmcs->guest_ss_base = vmcs12->guest_ss_base; + evmcs->guest_ds_base = vmcs12->guest_ds_base; + evmcs->guest_fs_base = vmcs12->guest_fs_base; + evmcs->guest_gs_base = vmcs12->guest_gs_base; + evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base; + evmcs->guest_tr_base = vmcs12->guest_tr_base; + evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base; + evmcs->guest_idtr_base = vmcs12->guest_idtr_base; + + evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat; + evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer; + + evmcs->guest_pdptr0 = vmcs12->guest_pdptr0; + evmcs->guest_pdptr1 = vmcs12->guest_pdptr1; + evmcs->guest_pdptr2 = vmcs12->guest_pdptr2; + evmcs->guest_pdptr3 = vmcs12->guest_pdptr3; + + evmcs->guest_pending_dbg_exceptions = + vmcs12->guest_pending_dbg_exceptions; + evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp; + evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip; + + evmcs->guest_activity_state = vmcs12->guest_activity_state; + evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs; + + evmcs->guest_cr0 = vmcs12->guest_cr0; + evmcs->guest_cr3 = vmcs12->guest_cr3; + evmcs->guest_cr4 = vmcs12->guest_cr4; + evmcs->guest_dr7 = vmcs12->guest_dr7; + + evmcs->guest_physical_address = vmcs12->guest_physical_address; + + evmcs->vm_instruction_error = vmcs12->vm_instruction_error; + evmcs->vm_exit_reason = vmcs12->vm_exit_reason; + evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info; + evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code; + evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field; + evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code; + evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len; + evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info; + + evmcs->exit_qualification = vmcs12->exit_qualification; + + evmcs->guest_linear_address = vmcs12->guest_linear_address; + evmcs->guest_rsp = vmcs12->guest_rsp; + evmcs->guest_rflags = vmcs12->guest_rflags; + + evmcs->guest_interruptibility_info = + vmcs12->guest_interruptibility_info; + evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control; + evmcs->vm_entry_controls = vmcs12->vm_entry_controls; + evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field; + evmcs->vm_entry_exception_error_code = + vmcs12->vm_entry_exception_error_code; + evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len; + + evmcs->guest_rip = vmcs12->guest_rip; + + evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs; + + return 0; +} + /* * Copy the writable VMCS shadow fields back to the VMCS12, in case * they have been modified by the L1 guest. Note that the "read-only" @@ -8854,7 +9243,7 @@ static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) SECONDARY_EXEC_SHADOW_VMCS); vmcs_write64(VMCS_LINK_POINTER, __pa(vmx->vmcs01.shadow_vmcs)); - vmx->nested.sync_shadow_vmcs = true; + vmx->nested.need_vmcs12_sync = true; } vmx->nested.dirty_vmcs12 = true; } @@ -10659,9 +11048,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) vmcs_write32(PLE_WINDOW, vmx->ple_window); } - if (vmx->nested.sync_shadow_vmcs) { - copy_vmcs12_to_shadow(vmx); - vmx->nested.sync_shadow_vmcs = false; + if (vmx->nested.need_vmcs12_sync) { + if (vmx->nested.hv_evmcs) { + copy_vmcs12_to_enlightened(vmx); + /* All fields are clean */ + vmx->nested.hv_evmcs->hv_clean_fields |= + HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; + } else { + copy_vmcs12_to_shadow(vmx); + } + vmx->nested.need_vmcs12_sync = false; } if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) @@ -12037,7 +12433,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) u32 exec_control, vmcs12_exec_ctrl; u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12); - if (vmx->nested.dirty_vmcs12) + if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) prepare_vmcs02_early_full(vmx, vmcs12); /* @@ -12312,7 +12708,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, { struct vcpu_vmx *vmx = to_vmx(vcpu); - if (vmx->nested.dirty_vmcs12) { + if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) { prepare_vmcs02_full(vmx, vmcs12); vmx->nested.dirty_vmcs12 = false; } @@ -12899,8 +13295,8 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, load_vmcs12_host_state(vcpu, vmcs12); vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY; vmcs12->exit_qualification = exit_qual; - if (enable_shadow_vmcs) - vmx->nested.sync_shadow_vmcs = true; + if (enable_shadow_vmcs || vmx->nested.hv_evmcs) + vmx->nested.need_vmcs12_sync = true; return 1; } @@ -12932,8 +13328,13 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) if (vmcs12->hdr.shadow_vmcs) return nested_vmx_failInvalid(vcpu); - if (enable_shadow_vmcs) + if (vmx->nested.hv_evmcs) { + copy_enlightened_to_vmcs12(vmx); + /* Enlightened VMCS doesn't have launch state */ + vmcs12->launch_state = !launch; + } else if (enable_shadow_vmcs) { copy_shadow_to_vmcs12(vmx); + } /* * The nested entry process starts with enforcing various prerequisites @@ -13690,8 +14091,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, */ kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); - if (enable_shadow_vmcs && exit_reason != -1) - vmx->nested.sync_shadow_vmcs = true; + if ((exit_reason != -1) && (enable_shadow_vmcs || vmx->nested.hv_evmcs)) + vmx->nested.need_vmcs12_sync = true; /* in case we halted in L2 */ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; @@ -14216,6 +14617,11 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, vmx = to_vmx(vcpu); vmcs12 = get_vmcs12(vcpu); + + /* FIXME: Enlightened VMCS is currently unsupported */ + if (vmx->nested.hv_evmcs) + return -ENOTSUPP; + if (nested_vmx_allowed(vcpu) && (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr; @@ -14257,12 +14663,12 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, * When running L2, the authoritative vmcs12 state is in the * vmcs02. When running L1, the authoritative vmcs12 state is * in the shadow vmcs linked to vmcs01, unless - * sync_shadow_vmcs is set, in which case, the authoritative + * need_vmcs12_sync is set, in which case, the authoritative * vmcs12 state is in the vmcs12 already. */ if (is_guest_mode(vcpu)) sync_vmcs12(vcpu, vmcs12); - else if (enable_shadow_vmcs && !vmx->nested.sync_shadow_vmcs) + else if (enable_shadow_vmcs && !vmx->nested.need_vmcs12_sync) copy_shadow_to_vmcs12(vmx); if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12))) From b8bbab928fb187530d6bb0932b86661f99e3a01d Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 16 Oct 2018 18:50:03 +0200 Subject: [PATCH 177/204] KVM: nVMX: implement enlightened VMPTRLD and VMCLEAR Per Hyper-V TLFS 5.0b: "The L1 hypervisor may choose to use enlightened VMCSs by writing 1 to the corresponding field in the VP assist page (see section 7.8.7). Another field in the VP assist page controls the currently active enlightened VMCS. Each enlightened VMCS is exactly one page (4 KB) in size and must be initially zeroed. No VMPTRLD instruction must be executed to make an enlightened VMCS active or current. After the L1 hypervisor performs a VM entry with an enlightened VMCS, the VMCS is considered active on the processor. An enlightened VMCS can only be active on a single processor at the same time. The L1 hypervisor can execute a VMCLEAR instruction to transition an enlightened VMCS from the active to the non-active state. Any VMREAD or VMWRITE instructions while an enlightened VMCS is active is unsupported and can result in unexpected behavior." Keep Enlightened VMCS structure for the current L2 guest permanently mapped from struct nested_vmx instead of mapping it every time. Suggested-by: Ladi Prosek Signed-off-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 115 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 108 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index aebd008ccccb..cfb44acd4291 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -20,6 +20,7 @@ #include "mmu.h" #include "cpuid.h" #include "lapic.h" +#include "hyperv.h" #include #include @@ -889,6 +890,8 @@ struct nested_vmx { bool guest_mode; } smm; + gpa_t hv_evmcs_vmptr; + struct page *hv_evmcs_page; struct hv_enlightened_vmcs *hv_evmcs; }; @@ -8111,11 +8114,13 @@ static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu) static int nested_vmx_failValid(struct kvm_vcpu *vcpu, u32 vm_instruction_error) { + struct vcpu_vmx *vmx = to_vmx(vcpu); + /* * failValid writes the error number to the current VMCS, which * can't be done if there isn't a current VMCS. */ - if (to_vmx(vcpu)->nested.current_vmptr == -1ull) + if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs) return nested_vmx_failInvalid(vcpu); vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) @@ -8441,6 +8446,20 @@ static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) vmcs_write64(VMCS_LINK_POINTER, -1ull); } +static inline void nested_release_evmcs(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (!vmx->nested.hv_evmcs) + return; + + kunmap(vmx->nested.hv_evmcs_page); + kvm_release_page_dirty(vmx->nested.hv_evmcs_page); + vmx->nested.hv_evmcs_vmptr = -1ull; + vmx->nested.hv_evmcs_page = NULL; + vmx->nested.hv_evmcs = NULL; +} + static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -8509,6 +8528,8 @@ static void free_nested(struct kvm_vcpu *vcpu) kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); + nested_release_evmcs(vcpu); + free_loaded_vmcs(&vmx->nested.vmcs02); } @@ -8542,12 +8563,18 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) return nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); - if (vmptr == vmx->nested.current_vmptr) - nested_release_vmcs12(vcpu); + if (vmx->nested.hv_evmcs_page) { + if (vmptr == vmx->nested.hv_evmcs_vmptr) + nested_release_evmcs(vcpu); + } else { + if (vmptr == vmx->nested.current_vmptr) + nested_release_vmcs12(vcpu); - kvm_vcpu_write_guest(vcpu, - vmptr + offsetof(struct vmcs12, launch_state), - &zero, sizeof(zero)); + kvm_vcpu_write_guest(vcpu, + vmptr + offsetof(struct vmcs12, + launch_state), + &zero, sizeof(zero)); + } return nested_vmx_succeed(vcpu); } @@ -8637,6 +8664,8 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx) struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; + vmcs12->hdr.revision_id = evmcs->revision_id; + /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */ vmcs12->tpr_threshold = evmcs->tpr_threshold; vmcs12->guest_rip = evmcs->guest_rip; @@ -9268,6 +9297,10 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) return nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER); + /* Forbid normal VMPTRLD if Enlightened version was used */ + if (vmx->nested.hv_evmcs) + return 1; + if (vmx->nested.current_vmptr != vmptr) { struct vmcs12 *new_vmcs12; struct page *page; @@ -9301,6 +9334,68 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) return nested_vmx_succeed(vcpu); } +/* + * This is an equivalent of the nested hypervisor executing the vmptrld + * instruction. + */ +static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct hv_vp_assist_page assist_page; + + if (likely(!vmx->nested.enlightened_vmcs_enabled)) + return 1; + + if (unlikely(!kvm_hv_get_assist_page(vcpu, &assist_page))) + return 1; + + if (unlikely(!assist_page.enlighten_vmentry)) + return 1; + + if (unlikely(assist_page.current_nested_vmcs != + vmx->nested.hv_evmcs_vmptr)) { + + if (!vmx->nested.hv_evmcs) + vmx->nested.current_vmptr = -1ull; + + nested_release_evmcs(vcpu); + + vmx->nested.hv_evmcs_page = kvm_vcpu_gpa_to_page( + vcpu, assist_page.current_nested_vmcs); + + if (unlikely(is_error_page(vmx->nested.hv_evmcs_page))) + return 0; + + vmx->nested.hv_evmcs = kmap(vmx->nested.hv_evmcs_page); + + if (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION) { + nested_release_evmcs(vcpu); + return 0; + } + + vmx->nested.dirty_vmcs12 = true; + /* + * As we keep L2 state for one guest only 'hv_clean_fields' mask + * can't be used when we switch between them. Reset it here for + * simplicity. + */ + vmx->nested.hv_evmcs->hv_clean_fields &= + ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; + vmx->nested.hv_evmcs_vmptr = assist_page.current_nested_vmcs; + + /* + * Unlike normal vmcs12, enlightened vmcs12 is not fully + * reloaded from guest's memory (read only fields, fields not + * present in struct hv_enlightened_vmcs, ...). Make sure there + * are no leftovers. + */ + memset(vmx->nested.cached_vmcs12, 0, + sizeof(*vmx->nested.cached_vmcs12)); + + } + return 1; +} + /* Emulate the VMPTRST instruction */ static int handle_vmptrst(struct kvm_vcpu *vcpu) { @@ -9313,6 +9408,9 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) if (!nested_vmx_check_permission(vcpu)) return 1; + if (unlikely(to_vmx(vcpu)->nested.hv_evmcs)) + return 1; + if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva)) return 1; /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ @@ -13314,7 +13412,10 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) if (!nested_vmx_check_permission(vcpu)) return 1; - if (vmx->nested.current_vmptr == -1ull) + if (!nested_vmx_handle_enlightened_vmptrld(vcpu)) + return 1; + + if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull) return nested_vmx_failInvalid(vcpu); vmcs12 = get_vmcs12(vcpu); From c4ebd6295ab730bf922c96d3c4045c3a84fd1cf7 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 16 Oct 2018 18:50:04 +0200 Subject: [PATCH 178/204] KVM: nVMX: optimize prepare_vmcs02{,_full} for Enlightened VMCS case When Enlightened VMCS is in use by L1 hypervisor we can avoid vmwriting VMCS fields which did not change. Our first goal is to achieve minimal impact on traditional VMCS case so we're not wrapping each vmwrite() with an if-changed checker. We also can't utilize static keys as Enlightened VMCS usage is per-guest. This patch implements the simpliest solution: checking fields in groups. We skip single vmwrite() statements as doing the check will cost us something even in non-evmcs case and the win is tiny. Unfortunately, this makes prepare_vmcs02_full{,_full}() code Enlightened VMCS-dependent (and a bit ugly). Signed-off-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 116 +++++++++++++++++++++++++-------------------- 1 file changed, 64 insertions(+), 52 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index cfb44acd4291..0b665c74fadc 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -12699,43 +12699,62 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) static void prepare_vmcs02_full(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) { - vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); - vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); - vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); - vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); - vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); - vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); - vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); - vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); - vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); - vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); - vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); - vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); - vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); - vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); - vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); - vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); - vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); - vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); - vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); - vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); - vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); - vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); - vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); - vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); - vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); - vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); - vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); - vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); - vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); - vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); - vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); + struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; - vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); - vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, - vmcs12->guest_pending_dbg_exceptions); - vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); - vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); + if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) { + vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); + vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); + vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); + vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); + vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); + vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); + vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); + vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); + vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); + vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); + vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); + vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); + vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); + vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); + vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); + vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); + vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); + vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); + vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); + vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); + vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); + vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); + vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); + vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); + vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); + vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); + vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); + vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); + vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); + vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); + vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); + } + + if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) { + vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); + vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, + vmcs12->guest_pending_dbg_exceptions); + vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); + vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); + + /* + * L1 may access the L2's PDPTR, so save them to construct + * vmcs12 + */ + if (enable_ept) { + vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); + vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); + vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); + vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); + } + } if (nested_cpu_has_xsaves(vmcs12)) vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); @@ -12778,16 +12797,6 @@ static void prepare_vmcs02_full(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) else vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs); } - - /* - * L1 may access the L2's PDPTR, so save them to construct vmcs12 - */ - if (enable_ept) { - vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); - vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); - vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); - vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); - } } /* @@ -12805,6 +12814,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 *entry_failure_code) { struct vcpu_vmx *vmx = to_vmx(vcpu); + struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) { prepare_vmcs02_full(vmx, vmcs12); @@ -12815,12 +12825,14 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, * First, the fields that are shadowed. This must be kept in sync * with vmx_shadow_fields.h. */ - - vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); - vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); - vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); - vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); - vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); + if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) { + vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); + vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); + vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); + vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); + vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); + } if (vmx->nested.nested_run_pending && (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { From 12e0c6186ba44bb6194cf5d2eda8f46880126587 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 16 Oct 2018 18:50:05 +0200 Subject: [PATCH 179/204] x86/kvm/hyperv: don't clear VP assist pages on init VP assist pages may hold valuable data which needs to be preserved across migration. Clean PV EOI portion of the data on init, the guest is responsible for making sure there's no garbage in the rest. This will be used for nVMX migration, eVMCS address needs to be preserved. Signed-off-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index d9b7b4cd4e43..4e80080f277a 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1104,7 +1104,13 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) addr = kvm_vcpu_gfn_to_hva(vcpu, gfn); if (kvm_is_error_hva(addr)) return 1; - if (__clear_user((void __user *)addr, PAGE_SIZE)) + + /* + * Clear apic_assist portion of f(struct hv_vp_assist_page + * only, there can be valuable data in the rest which needs + * to be preserved e.g. on migration. + */ + if (__clear_user((void __user *)addr, sizeof(u32))) return 1; hv_vcpu->hv_vapic = data; kvm_vcpu_mark_page_dirty(vcpu, gfn); From a7c42bb6da6b1b54b2e7bd567636d72d87b10a79 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 16 Oct 2018 18:50:06 +0200 Subject: [PATCH 180/204] x86/kvm/lapic: preserve gfn_to_hva_cache len on cache reinit vcpu->arch.pv_eoi is accessible through both HV_X64_MSR_VP_ASSIST_PAGE and MSR_KVM_PV_EOI_EN so on migration userspace may try to restore them in any order. Values match, however, kvm_lapic_enable_pv_eoi() uses different length: for Hyper-V case it's the whole struct hv_vp_assist_page, for KVM native case it is 8. In case we restore KVM-native MSR last cache will be reinitialized with len=8 so trying to access VP assist page beyond 8 bytes with kvm_read_guest_cached() will fail. Check if we re-initializing cache for the same address and preserve length in case it was greater. Signed-off-by: Vitaly Kuznetsov Cc: stable@vger.kernel.org Signed-off-by: Paolo Bonzini --- arch/x86/kvm/lapic.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 79358fd6a71c..3cd227ff807f 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2647,14 +2647,22 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data) int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len) { u64 addr = data & ~KVM_MSR_ENABLED; + struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data; + unsigned long new_len; + if (!IS_ALIGNED(addr, 4)) return 1; vcpu->arch.pv_eoi.msr_val = data; if (!pv_eoi_enabled(vcpu)) return 0; - return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, - addr, len); + + if (addr == ghc->gpa && len <= ghc->len) + new_len = ghc->len; + else + new_len = len; + + return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len); } void kvm_apic_accept_events(struct kvm_vcpu *vcpu) From a1b0c1c64dfef0cff8555bb708bfc5d7c66c6ca4 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 16 Oct 2018 18:50:07 +0200 Subject: [PATCH 181/204] x86/kvm/nVMX: allow bare VMXON state migration It is perfectly valid for a guest to do VMXON and not do VMPTRLD. This state needs to be preserved on migration. Cc: stable@vger.kernel.org Fixes: 8fcc4b5923af5de58b80b53a069453b135693304 Signed-off-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 0b665c74fadc..5df47291ad4d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -14827,13 +14827,6 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa)) return -EINVAL; - if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12)) - return -EINVAL; - - if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa || - !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa)) - return -EINVAL; - if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) return -EINVAL; @@ -14863,6 +14856,14 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, if (ret) return ret; + /* Empty 'VMXON' state is permitted */ + if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12)) + return 0; + + if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa || + !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa)) + return -EINVAL; + set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa); if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { From 1e7ecd1b3d21a6302f3ee4a3720f682eb2467a3c Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 16 Oct 2018 18:50:08 +0200 Subject: [PATCH 182/204] KVM: selftests: state_test: test bare VMXON migration Split prepare_for_vmx_operation() into prepare_for_vmx_operation() and load_vmcs() so we can inject GUEST_SYNC() in between. Signed-off-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini --- .../selftests/kvm/include/x86_64/vmx.h | 1 + tools/testing/selftests/kvm/lib/x86_64/vmx.c | 5 +++++ .../testing/selftests/kvm/x86_64/state_test.c | 22 ++++++++++--------- .../kvm/x86_64/vmx_tsc_adjust_test.c | 1 + 4 files changed, 19 insertions(+), 10 deletions(-) diff --git a/tools/testing/selftests/kvm/include/x86_64/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h index 12ebd836f7ef..4bbee8560292 100644 --- a/tools/testing/selftests/kvm/include/x86_64/vmx.h +++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h @@ -548,5 +548,6 @@ struct vmx_pages { struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva); bool prepare_for_vmx_operation(struct vmx_pages *vmx); void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp); +bool load_vmcs(struct vmx_pages *vmx); #endif /* SELFTEST_KVM_VMX_H */ diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c index d7c401472247..cc356da9b3d8 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c +++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c @@ -107,6 +107,11 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx) if (vmxon(vmx->vmxon_gpa)) return false; + return true; +} + +bool load_vmcs(struct vmx_pages *vmx) +{ /* Load a VMCS. */ *(uint32_t *)(vmx->vmcs) = vmcs_revision(); if (vmclear(vmx->vmcs_gpa)) diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c index 43df194a7c1e..03da41f0f736 100644 --- a/tools/testing/selftests/kvm/x86_64/state_test.c +++ b/tools/testing/selftests/kvm/x86_64/state_test.c @@ -26,20 +26,20 @@ static bool have_nested_state; void l2_guest_code(void) { - GUEST_SYNC(5); + GUEST_SYNC(6); /* Exit to L1 */ vmcall(); /* L1 has now set up a shadow VMCS for us. */ GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee); - GUEST_SYNC(9); + GUEST_SYNC(10); GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee); GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0fffee)); - GUEST_SYNC(10); + GUEST_SYNC(11); GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0fffee); GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0ffffee)); - GUEST_SYNC(11); + GUEST_SYNC(12); /* Done, exit to L1 and never come back. */ vmcall(); @@ -52,15 +52,17 @@ void l1_guest_code(struct vmx_pages *vmx_pages) GUEST_ASSERT(vmx_pages->vmcs_gpa); GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); + GUEST_SYNC(3); + GUEST_ASSERT(load_vmcs(vmx_pages)); GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); - GUEST_SYNC(3); + GUEST_SYNC(4); GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); prepare_vmcs(vmx_pages, l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); - GUEST_SYNC(4); + GUEST_SYNC(5); GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); GUEST_ASSERT(!vmlaunch()); GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); @@ -72,7 +74,7 @@ void l1_guest_code(struct vmx_pages *vmx_pages) GUEST_ASSERT(!vmresume()); GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); - GUEST_SYNC(6); + GUEST_SYNC(7); GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); GUEST_ASSERT(!vmresume()); @@ -85,12 +87,12 @@ void l1_guest_code(struct vmx_pages *vmx_pages) GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa)); GUEST_ASSERT(vmlaunch()); - GUEST_SYNC(7); + GUEST_SYNC(8); GUEST_ASSERT(vmlaunch()); GUEST_ASSERT(vmresume()); vmwrite(GUEST_RIP, 0xc0ffee); - GUEST_SYNC(8); + GUEST_SYNC(9); GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee); GUEST_ASSERT(!vmptrld(vmx_pages->vmcs_gpa)); @@ -101,7 +103,7 @@ void l1_guest_code(struct vmx_pages *vmx_pages) GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee); GUEST_ASSERT(vmlaunch()); GUEST_ASSERT(vmresume()); - GUEST_SYNC(12); + GUEST_SYNC(13); GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee); GUEST_ASSERT(vmlaunch()); GUEST_ASSERT(vmresume()); diff --git a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c index 38a91a5f04ac..18fa64db0d7a 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c @@ -94,6 +94,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages) check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE); GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); + GUEST_ASSERT(load_vmcs(vmx_pages)); /* Prepare the VMCS for L2 execution. */ prepare_vmcs(vmx_pages, l2_guest_code, From 8cab6507f64eff0ccfea01fccbc7e3e05e2aaf7e Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 16 Oct 2018 18:50:09 +0200 Subject: [PATCH 183/204] x86/kvm/nVMX: nested state migration for Enlightened VMCS Add support for get/set of nested state when Enlightened VMCS is in use. A new KVM_STATE_NESTED_EVMCS flag to indicate eVMCS on the vCPU was enabled is added. Signed-off-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini --- arch/x86/include/uapi/asm/kvm.h | 1 + arch/x86/kvm/vmx.c | 78 +++++++++++++++++++++++++-------- arch/x86/kvm/x86.c | 6 ++- 3 files changed, 65 insertions(+), 20 deletions(-) diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index fd23d5778ea1..ab76aa1d3a4d 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -381,6 +381,7 @@ struct kvm_sync_regs { #define KVM_STATE_NESTED_GUEST_MODE 0x00000001 #define KVM_STATE_NESTED_RUN_PENDING 0x00000002 +#define KVM_STATE_NESTED_EVMCS 0x00000004 #define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001 #define KVM_STATE_NESTED_SMM_VMXON 0x00000002 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 5df47291ad4d..8d7c60faaacd 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1618,7 +1618,8 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu, * maximum supported version. KVM supports versions from 1 to * KVM_EVMCS_VERSION. */ - *vmcs_version = (KVM_EVMCS_VERSION << 8) | 1; + if (vmcs_version) + *vmcs_version = (KVM_EVMCS_VERSION << 8) | 1; vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL; vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL; @@ -9338,7 +9339,8 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) * This is an equivalent of the nested hypervisor executing the vmptrld * instruction. */ -static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu) +static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu, + bool from_launch) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct hv_vp_assist_page assist_page; @@ -9389,8 +9391,9 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu) * present in struct hv_enlightened_vmcs, ...). Make sure there * are no leftovers. */ - memset(vmx->nested.cached_vmcs12, 0, - sizeof(*vmx->nested.cached_vmcs12)); + if (from_launch) + memset(vmx->nested.cached_vmcs12, 0, + sizeof(*vmx->nested.cached_vmcs12)); } return 1; @@ -11147,6 +11150,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) } if (vmx->nested.need_vmcs12_sync) { + /* + * hv_evmcs may end up being not mapped after migration (when + * L2 was running), map it here to make sure vmcs12 changes are + * properly reflected. + */ + if (vmx->nested.enlightened_vmcs_enabled && + !vmx->nested.hv_evmcs) + nested_vmx_handle_enlightened_vmptrld(vcpu, false); + if (vmx->nested.hv_evmcs) { copy_vmcs12_to_enlightened(vmx); /* All fields are clean */ @@ -13424,7 +13436,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) if (!nested_vmx_check_permission(vcpu)) return 1; - if (!nested_vmx_handle_enlightened_vmptrld(vcpu)) + if (!nested_vmx_handle_enlightened_vmptrld(vcpu, true)) return 1; if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull) @@ -14711,6 +14723,20 @@ static int enable_smi_window(struct kvm_vcpu *vcpu) return 0; } +static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + /* + * In case we do two consecutive get/set_nested_state()s while L2 was + * running hv_evmcs may end up not being mapped (we map it from + * nested_vmx_run()/vmx_vcpu_run()). Check is_guest_mode() as we always + * have vmcs12 if it is true. + */ + return is_guest_mode(vcpu) || vmx->nested.current_vmptr != -1ull || + vmx->nested.hv_evmcs; +} + static int vmx_get_nested_state(struct kvm_vcpu *vcpu, struct kvm_nested_state __user *user_kvm_nested_state, u32 user_data_size) @@ -14731,16 +14757,15 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, vmx = to_vmx(vcpu); vmcs12 = get_vmcs12(vcpu); - /* FIXME: Enlightened VMCS is currently unsupported */ - if (vmx->nested.hv_evmcs) - return -ENOTSUPP; + if (nested_vmx_allowed(vcpu) && vmx->nested.enlightened_vmcs_enabled) + kvm_state.flags |= KVM_STATE_NESTED_EVMCS; if (nested_vmx_allowed(vcpu) && (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr; kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr; - if (vmx->nested.current_vmptr != -1ull) { + if (vmx_has_valid_vmcs12(vcpu)) { kvm_state.size += VMCS12_SIZE; if (is_guest_mode(vcpu) && @@ -14769,20 +14794,24 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state))) return -EFAULT; - if (vmx->nested.current_vmptr == -1ull) + if (!vmx_has_valid_vmcs12(vcpu)) goto out; /* * When running L2, the authoritative vmcs12 state is in the * vmcs02. When running L1, the authoritative vmcs12 state is - * in the shadow vmcs linked to vmcs01, unless + * in the shadow or enlightened vmcs linked to vmcs01, unless * need_vmcs12_sync is set, in which case, the authoritative * vmcs12 state is in the vmcs12 already. */ - if (is_guest_mode(vcpu)) + if (is_guest_mode(vcpu)) { sync_vmcs12(vcpu, vmcs12); - else if (enable_shadow_vmcs && !vmx->nested.need_vmcs12_sync) - copy_shadow_to_vmcs12(vmx); + } else if (!vmx->nested.need_vmcs12_sync) { + if (vmx->nested.hv_evmcs) + copy_enlightened_to_vmcs12(vmx); + else if (enable_shadow_vmcs) + copy_shadow_to_vmcs12(vmx); + } if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12))) return -EFAULT; @@ -14810,6 +14839,9 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, if (kvm_state->format != 0) return -EINVAL; + if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) + nested_enable_evmcs(vcpu, NULL); + if (!nested_vmx_allowed(vcpu)) return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL; @@ -14860,11 +14892,21 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12)) return 0; - if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa || - !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa)) - return -EINVAL; + if (kvm_state->vmx.vmcs_pa != -1ull) { + if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa || + !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa)) + return -EINVAL; - set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa); + set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa); + } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) { + /* + * Sync eVMCS upon entry as we may not have + * HV_X64_MSR_VP_ASSIST_PAGE set up yet. + */ + vmx->nested.need_vmcs12_sync = true; + } else { + return -EINVAL; + } if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { vmx->nested.smm.vmxon = true; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 50f308499ce5..0fe1e3762e82 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4068,11 +4068,13 @@ long kvm_arch_vcpu_ioctl(struct file *filp, break; if (kvm_state.flags & - ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE)) + ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE + | KVM_STATE_NESTED_EVMCS)) break; /* nested_run_pending implies guest_mode. */ - if (kvm_state.flags == KVM_STATE_NESTED_RUN_PENDING) + if ((kvm_state.flags & KVM_STATE_NESTED_RUN_PENDING) + && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE)) break; r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state); From c939989d74e27f44229a51d8fe96b5c297965bfa Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 16 Oct 2018 18:50:10 +0200 Subject: [PATCH 184/204] tools/headers: update kvm.h Pick up the latest kvm.h definitions. Signed-off-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini --- tools/include/uapi/linux/kvm.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 07548de5c988..2875ce85b322 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -719,6 +719,7 @@ struct kvm_ppc_one_seg_page_size { #define KVM_PPC_PAGE_SIZES_REAL 0x00000001 #define KVM_PPC_1T_SEGMENTS 0x00000002 +#define KVM_PPC_NO_HASH 0x00000004 struct kvm_ppc_smmu_info { __u64 flags; @@ -952,6 +953,11 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_S390_HPAGE_1M 156 #define KVM_CAP_NESTED_STATE 157 #define KVM_CAP_ARM_INJECT_SERROR_ESR 158 +#define KVM_CAP_MSR_PLATFORM_INFO 159 +#define KVM_CAP_PPC_NESTED_HV 160 +#define KVM_CAP_HYPERV_SEND_IPI 161 +#define KVM_CAP_COALESCED_PIO 162 +#define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163 #ifdef KVM_CAP_IRQ_ROUTING From 18178ff862170ee4c9fd7450173c6fba9e3c4c3a Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 16 Oct 2018 18:50:11 +0200 Subject: [PATCH 185/204] KVM: selftests: add Enlightened VMCS test Modify test library and add eVMCS test. This includes nVMX save/restore testing. Signed-off-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/Makefile | 1 + tools/testing/selftests/kvm/include/evmcs.h | 1098 +++++++++++++++++ .../selftests/kvm/include/x86_64/vmx.h | 28 + tools/testing/selftests/kvm/lib/x86_64/vmx.c | 44 +- .../testing/selftests/kvm/x86_64/evmcs_test.c | 160 +++ 5 files changed, 1321 insertions(+), 10 deletions(-) create mode 100644 tools/testing/selftests/kvm/include/evmcs.h create mode 100644 tools/testing/selftests/kvm/x86_64/evmcs_test.c diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 5e23f97ec46f..01a219229238 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -13,6 +13,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test TEST_GEN_PROGS_x86_64 += x86_64/cr4_cpuid_sync_test TEST_GEN_PROGS_x86_64 += x86_64/state_test +TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test TEST_GEN_PROGS_x86_64 += dirty_log_test TEST_GEN_PROGS_aarch64 += dirty_log_test diff --git a/tools/testing/selftests/kvm/include/evmcs.h b/tools/testing/selftests/kvm/include/evmcs.h new file mode 100644 index 000000000000..4059014d93ea --- /dev/null +++ b/tools/testing/selftests/kvm/include/evmcs.h @@ -0,0 +1,1098 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * tools/testing/selftests/kvm/include/vmx.h + * + * Copyright (C) 2018, Red Hat, Inc. + * + */ + +#ifndef SELFTEST_KVM_EVMCS_H +#define SELFTEST_KVM_EVMCS_H + +#include +#include "vmx.h" + +#define u16 uint16_t +#define u32 uint32_t +#define u64 uint64_t + +extern bool enable_evmcs; + +struct hv_vp_assist_page { + __u32 apic_assist; + __u32 reserved; + __u64 vtl_control[2]; + __u64 nested_enlightenments_control[2]; + __u32 enlighten_vmentry; + __u64 current_nested_vmcs; +}; + +struct hv_enlightened_vmcs { + u32 revision_id; + u32 abort; + + u16 host_es_selector; + u16 host_cs_selector; + u16 host_ss_selector; + u16 host_ds_selector; + u16 host_fs_selector; + u16 host_gs_selector; + u16 host_tr_selector; + + u64 host_ia32_pat; + u64 host_ia32_efer; + + u64 host_cr0; + u64 host_cr3; + u64 host_cr4; + + u64 host_ia32_sysenter_esp; + u64 host_ia32_sysenter_eip; + u64 host_rip; + u32 host_ia32_sysenter_cs; + + u32 pin_based_vm_exec_control; + u32 vm_exit_controls; + u32 secondary_vm_exec_control; + + u64 io_bitmap_a; + u64 io_bitmap_b; + u64 msr_bitmap; + + u16 guest_es_selector; + u16 guest_cs_selector; + u16 guest_ss_selector; + u16 guest_ds_selector; + u16 guest_fs_selector; + u16 guest_gs_selector; + u16 guest_ldtr_selector; + u16 guest_tr_selector; + + u32 guest_es_limit; + u32 guest_cs_limit; + u32 guest_ss_limit; + u32 guest_ds_limit; + u32 guest_fs_limit; + u32 guest_gs_limit; + u32 guest_ldtr_limit; + u32 guest_tr_limit; + u32 guest_gdtr_limit; + u32 guest_idtr_limit; + + u32 guest_es_ar_bytes; + u32 guest_cs_ar_bytes; + u32 guest_ss_ar_bytes; + u32 guest_ds_ar_bytes; + u32 guest_fs_ar_bytes; + u32 guest_gs_ar_bytes; + u32 guest_ldtr_ar_bytes; + u32 guest_tr_ar_bytes; + + u64 guest_es_base; + u64 guest_cs_base; + u64 guest_ss_base; + u64 guest_ds_base; + u64 guest_fs_base; + u64 guest_gs_base; + u64 guest_ldtr_base; + u64 guest_tr_base; + u64 guest_gdtr_base; + u64 guest_idtr_base; + + u64 padding64_1[3]; + + u64 vm_exit_msr_store_addr; + u64 vm_exit_msr_load_addr; + u64 vm_entry_msr_load_addr; + + u64 cr3_target_value0; + u64 cr3_target_value1; + u64 cr3_target_value2; + u64 cr3_target_value3; + + u32 page_fault_error_code_mask; + u32 page_fault_error_code_match; + + u32 cr3_target_count; + u32 vm_exit_msr_store_count; + u32 vm_exit_msr_load_count; + u32 vm_entry_msr_load_count; + + u64 tsc_offset; + u64 virtual_apic_page_addr; + u64 vmcs_link_pointer; + + u64 guest_ia32_debugctl; + u64 guest_ia32_pat; + u64 guest_ia32_efer; + + u64 guest_pdptr0; + u64 guest_pdptr1; + u64 guest_pdptr2; + u64 guest_pdptr3; + + u64 guest_pending_dbg_exceptions; + u64 guest_sysenter_esp; + u64 guest_sysenter_eip; + + u32 guest_activity_state; + u32 guest_sysenter_cs; + + u64 cr0_guest_host_mask; + u64 cr4_guest_host_mask; + u64 cr0_read_shadow; + u64 cr4_read_shadow; + u64 guest_cr0; + u64 guest_cr3; + u64 guest_cr4; + u64 guest_dr7; + + u64 host_fs_base; + u64 host_gs_base; + u64 host_tr_base; + u64 host_gdtr_base; + u64 host_idtr_base; + u64 host_rsp; + + u64 ept_pointer; + + u16 virtual_processor_id; + u16 padding16[3]; + + u64 padding64_2[5]; + u64 guest_physical_address; + + u32 vm_instruction_error; + u32 vm_exit_reason; + u32 vm_exit_intr_info; + u32 vm_exit_intr_error_code; + u32 idt_vectoring_info_field; + u32 idt_vectoring_error_code; + u32 vm_exit_instruction_len; + u32 vmx_instruction_info; + + u64 exit_qualification; + u64 exit_io_instruction_ecx; + u64 exit_io_instruction_esi; + u64 exit_io_instruction_edi; + u64 exit_io_instruction_eip; + + u64 guest_linear_address; + u64 guest_rsp; + u64 guest_rflags; + + u32 guest_interruptibility_info; + u32 cpu_based_vm_exec_control; + u32 exception_bitmap; + u32 vm_entry_controls; + u32 vm_entry_intr_info_field; + u32 vm_entry_exception_error_code; + u32 vm_entry_instruction_len; + u32 tpr_threshold; + + u64 guest_rip; + + u32 hv_clean_fields; + u32 hv_padding_32; + u32 hv_synthetic_controls; + struct { + u32 nested_flush_hypercall:1; + u32 msr_bitmap:1; + u32 reserved:30; + } hv_enlightenments_control; + u32 hv_vp_id; + + u64 hv_vm_id; + u64 partition_assist_page; + u64 padding64_4[4]; + u64 guest_bndcfgs; + u64 padding64_5[7]; + u64 xss_exit_bitmap; + u64 padding64_6[7]; +}; + +#define HV_X64_MSR_VP_ASSIST_PAGE 0x40000073 +#define HV_X64_MSR_VP_ASSIST_PAGE_ENABLE 0x00000001 +#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT 12 +#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK \ + (~((1ull << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT) - 1)) + +struct hv_enlightened_vmcs *current_evmcs; +struct hv_vp_assist_page *current_vp_assist; + +static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist) +{ + u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) | + HV_X64_MSR_VP_ASSIST_PAGE_ENABLE; + + wrmsr(HV_X64_MSR_VP_ASSIST_PAGE, val); + + current_vp_assist = vp_assist; + + enable_evmcs = true; + + return 0; +} + +static inline int evmcs_vmptrld(uint64_t vmcs_pa, void *vmcs) +{ + current_vp_assist->current_nested_vmcs = vmcs_pa; + current_vp_assist->enlighten_vmentry = 1; + + current_evmcs = vmcs; + + return 0; +} + +static inline int evmcs_vmptrst(uint64_t *value) +{ + *value = current_vp_assist->current_nested_vmcs & + ~HV_X64_MSR_VP_ASSIST_PAGE_ENABLE; + + return 0; +} + +static inline int evmcs_vmread(uint64_t encoding, uint64_t *value) +{ + switch (encoding) { + case GUEST_RIP: + *value = current_evmcs->guest_rip; + break; + case GUEST_RSP: + *value = current_evmcs->guest_rsp; + break; + case GUEST_RFLAGS: + *value = current_evmcs->guest_rflags; + break; + case HOST_IA32_PAT: + *value = current_evmcs->host_ia32_pat; + break; + case HOST_IA32_EFER: + *value = current_evmcs->host_ia32_efer; + break; + case HOST_CR0: + *value = current_evmcs->host_cr0; + break; + case HOST_CR3: + *value = current_evmcs->host_cr3; + break; + case HOST_CR4: + *value = current_evmcs->host_cr4; + break; + case HOST_IA32_SYSENTER_ESP: + *value = current_evmcs->host_ia32_sysenter_esp; + break; + case HOST_IA32_SYSENTER_EIP: + *value = current_evmcs->host_ia32_sysenter_eip; + break; + case HOST_RIP: + *value = current_evmcs->host_rip; + break; + case IO_BITMAP_A: + *value = current_evmcs->io_bitmap_a; + break; + case IO_BITMAP_B: + *value = current_evmcs->io_bitmap_b; + break; + case MSR_BITMAP: + *value = current_evmcs->msr_bitmap; + break; + case GUEST_ES_BASE: + *value = current_evmcs->guest_es_base; + break; + case GUEST_CS_BASE: + *value = current_evmcs->guest_cs_base; + break; + case GUEST_SS_BASE: + *value = current_evmcs->guest_ss_base; + break; + case GUEST_DS_BASE: + *value = current_evmcs->guest_ds_base; + break; + case GUEST_FS_BASE: + *value = current_evmcs->guest_fs_base; + break; + case GUEST_GS_BASE: + *value = current_evmcs->guest_gs_base; + break; + case GUEST_LDTR_BASE: + *value = current_evmcs->guest_ldtr_base; + break; + case GUEST_TR_BASE: + *value = current_evmcs->guest_tr_base; + break; + case GUEST_GDTR_BASE: + *value = current_evmcs->guest_gdtr_base; + break; + case GUEST_IDTR_BASE: + *value = current_evmcs->guest_idtr_base; + break; + case TSC_OFFSET: + *value = current_evmcs->tsc_offset; + break; + case VIRTUAL_APIC_PAGE_ADDR: + *value = current_evmcs->virtual_apic_page_addr; + break; + case VMCS_LINK_POINTER: + *value = current_evmcs->vmcs_link_pointer; + break; + case GUEST_IA32_DEBUGCTL: + *value = current_evmcs->guest_ia32_debugctl; + break; + case GUEST_IA32_PAT: + *value = current_evmcs->guest_ia32_pat; + break; + case GUEST_IA32_EFER: + *value = current_evmcs->guest_ia32_efer; + break; + case GUEST_PDPTR0: + *value = current_evmcs->guest_pdptr0; + break; + case GUEST_PDPTR1: + *value = current_evmcs->guest_pdptr1; + break; + case GUEST_PDPTR2: + *value = current_evmcs->guest_pdptr2; + break; + case GUEST_PDPTR3: + *value = current_evmcs->guest_pdptr3; + break; + case GUEST_PENDING_DBG_EXCEPTIONS: + *value = current_evmcs->guest_pending_dbg_exceptions; + break; + case GUEST_SYSENTER_ESP: + *value = current_evmcs->guest_sysenter_esp; + break; + case GUEST_SYSENTER_EIP: + *value = current_evmcs->guest_sysenter_eip; + break; + case CR0_GUEST_HOST_MASK: + *value = current_evmcs->cr0_guest_host_mask; + break; + case CR4_GUEST_HOST_MASK: + *value = current_evmcs->cr4_guest_host_mask; + break; + case CR0_READ_SHADOW: + *value = current_evmcs->cr0_read_shadow; + break; + case CR4_READ_SHADOW: + *value = current_evmcs->cr4_read_shadow; + break; + case GUEST_CR0: + *value = current_evmcs->guest_cr0; + break; + case GUEST_CR3: + *value = current_evmcs->guest_cr3; + break; + case GUEST_CR4: + *value = current_evmcs->guest_cr4; + break; + case GUEST_DR7: + *value = current_evmcs->guest_dr7; + break; + case HOST_FS_BASE: + *value = current_evmcs->host_fs_base; + break; + case HOST_GS_BASE: + *value = current_evmcs->host_gs_base; + break; + case HOST_TR_BASE: + *value = current_evmcs->host_tr_base; + break; + case HOST_GDTR_BASE: + *value = current_evmcs->host_gdtr_base; + break; + case HOST_IDTR_BASE: + *value = current_evmcs->host_idtr_base; + break; + case HOST_RSP: + *value = current_evmcs->host_rsp; + break; + case EPT_POINTER: + *value = current_evmcs->ept_pointer; + break; + case GUEST_BNDCFGS: + *value = current_evmcs->guest_bndcfgs; + break; + case XSS_EXIT_BITMAP: + *value = current_evmcs->xss_exit_bitmap; + break; + case GUEST_PHYSICAL_ADDRESS: + *value = current_evmcs->guest_physical_address; + break; + case EXIT_QUALIFICATION: + *value = current_evmcs->exit_qualification; + break; + case GUEST_LINEAR_ADDRESS: + *value = current_evmcs->guest_linear_address; + break; + case VM_EXIT_MSR_STORE_ADDR: + *value = current_evmcs->vm_exit_msr_store_addr; + break; + case VM_EXIT_MSR_LOAD_ADDR: + *value = current_evmcs->vm_exit_msr_load_addr; + break; + case VM_ENTRY_MSR_LOAD_ADDR: + *value = current_evmcs->vm_entry_msr_load_addr; + break; + case CR3_TARGET_VALUE0: + *value = current_evmcs->cr3_target_value0; + break; + case CR3_TARGET_VALUE1: + *value = current_evmcs->cr3_target_value1; + break; + case CR3_TARGET_VALUE2: + *value = current_evmcs->cr3_target_value2; + break; + case CR3_TARGET_VALUE3: + *value = current_evmcs->cr3_target_value3; + break; + case TPR_THRESHOLD: + *value = current_evmcs->tpr_threshold; + break; + case GUEST_INTERRUPTIBILITY_INFO: + *value = current_evmcs->guest_interruptibility_info; + break; + case CPU_BASED_VM_EXEC_CONTROL: + *value = current_evmcs->cpu_based_vm_exec_control; + break; + case EXCEPTION_BITMAP: + *value = current_evmcs->exception_bitmap; + break; + case VM_ENTRY_CONTROLS: + *value = current_evmcs->vm_entry_controls; + break; + case VM_ENTRY_INTR_INFO_FIELD: + *value = current_evmcs->vm_entry_intr_info_field; + break; + case VM_ENTRY_EXCEPTION_ERROR_CODE: + *value = current_evmcs->vm_entry_exception_error_code; + break; + case VM_ENTRY_INSTRUCTION_LEN: + *value = current_evmcs->vm_entry_instruction_len; + break; + case HOST_IA32_SYSENTER_CS: + *value = current_evmcs->host_ia32_sysenter_cs; + break; + case PIN_BASED_VM_EXEC_CONTROL: + *value = current_evmcs->pin_based_vm_exec_control; + break; + case VM_EXIT_CONTROLS: + *value = current_evmcs->vm_exit_controls; + break; + case SECONDARY_VM_EXEC_CONTROL: + *value = current_evmcs->secondary_vm_exec_control; + break; + case GUEST_ES_LIMIT: + *value = current_evmcs->guest_es_limit; + break; + case GUEST_CS_LIMIT: + *value = current_evmcs->guest_cs_limit; + break; + case GUEST_SS_LIMIT: + *value = current_evmcs->guest_ss_limit; + break; + case GUEST_DS_LIMIT: + *value = current_evmcs->guest_ds_limit; + break; + case GUEST_FS_LIMIT: + *value = current_evmcs->guest_fs_limit; + break; + case GUEST_GS_LIMIT: + *value = current_evmcs->guest_gs_limit; + break; + case GUEST_LDTR_LIMIT: + *value = current_evmcs->guest_ldtr_limit; + break; + case GUEST_TR_LIMIT: + *value = current_evmcs->guest_tr_limit; + break; + case GUEST_GDTR_LIMIT: + *value = current_evmcs->guest_gdtr_limit; + break; + case GUEST_IDTR_LIMIT: + *value = current_evmcs->guest_idtr_limit; + break; + case GUEST_ES_AR_BYTES: + *value = current_evmcs->guest_es_ar_bytes; + break; + case GUEST_CS_AR_BYTES: + *value = current_evmcs->guest_cs_ar_bytes; + break; + case GUEST_SS_AR_BYTES: + *value = current_evmcs->guest_ss_ar_bytes; + break; + case GUEST_DS_AR_BYTES: + *value = current_evmcs->guest_ds_ar_bytes; + break; + case GUEST_FS_AR_BYTES: + *value = current_evmcs->guest_fs_ar_bytes; + break; + case GUEST_GS_AR_BYTES: + *value = current_evmcs->guest_gs_ar_bytes; + break; + case GUEST_LDTR_AR_BYTES: + *value = current_evmcs->guest_ldtr_ar_bytes; + break; + case GUEST_TR_AR_BYTES: + *value = current_evmcs->guest_tr_ar_bytes; + break; + case GUEST_ACTIVITY_STATE: + *value = current_evmcs->guest_activity_state; + break; + case GUEST_SYSENTER_CS: + *value = current_evmcs->guest_sysenter_cs; + break; + case VM_INSTRUCTION_ERROR: + *value = current_evmcs->vm_instruction_error; + break; + case VM_EXIT_REASON: + *value = current_evmcs->vm_exit_reason; + break; + case VM_EXIT_INTR_INFO: + *value = current_evmcs->vm_exit_intr_info; + break; + case VM_EXIT_INTR_ERROR_CODE: + *value = current_evmcs->vm_exit_intr_error_code; + break; + case IDT_VECTORING_INFO_FIELD: + *value = current_evmcs->idt_vectoring_info_field; + break; + case IDT_VECTORING_ERROR_CODE: + *value = current_evmcs->idt_vectoring_error_code; + break; + case VM_EXIT_INSTRUCTION_LEN: + *value = current_evmcs->vm_exit_instruction_len; + break; + case VMX_INSTRUCTION_INFO: + *value = current_evmcs->vmx_instruction_info; + break; + case PAGE_FAULT_ERROR_CODE_MASK: + *value = current_evmcs->page_fault_error_code_mask; + break; + case PAGE_FAULT_ERROR_CODE_MATCH: + *value = current_evmcs->page_fault_error_code_match; + break; + case CR3_TARGET_COUNT: + *value = current_evmcs->cr3_target_count; + break; + case VM_EXIT_MSR_STORE_COUNT: + *value = current_evmcs->vm_exit_msr_store_count; + break; + case VM_EXIT_MSR_LOAD_COUNT: + *value = current_evmcs->vm_exit_msr_load_count; + break; + case VM_ENTRY_MSR_LOAD_COUNT: + *value = current_evmcs->vm_entry_msr_load_count; + break; + case HOST_ES_SELECTOR: + *value = current_evmcs->host_es_selector; + break; + case HOST_CS_SELECTOR: + *value = current_evmcs->host_cs_selector; + break; + case HOST_SS_SELECTOR: + *value = current_evmcs->host_ss_selector; + break; + case HOST_DS_SELECTOR: + *value = current_evmcs->host_ds_selector; + break; + case HOST_FS_SELECTOR: + *value = current_evmcs->host_fs_selector; + break; + case HOST_GS_SELECTOR: + *value = current_evmcs->host_gs_selector; + break; + case HOST_TR_SELECTOR: + *value = current_evmcs->host_tr_selector; + break; + case GUEST_ES_SELECTOR: + *value = current_evmcs->guest_es_selector; + break; + case GUEST_CS_SELECTOR: + *value = current_evmcs->guest_cs_selector; + break; + case GUEST_SS_SELECTOR: + *value = current_evmcs->guest_ss_selector; + break; + case GUEST_DS_SELECTOR: + *value = current_evmcs->guest_ds_selector; + break; + case GUEST_FS_SELECTOR: + *value = current_evmcs->guest_fs_selector; + break; + case GUEST_GS_SELECTOR: + *value = current_evmcs->guest_gs_selector; + break; + case GUEST_LDTR_SELECTOR: + *value = current_evmcs->guest_ldtr_selector; + break; + case GUEST_TR_SELECTOR: + *value = current_evmcs->guest_tr_selector; + break; + case VIRTUAL_PROCESSOR_ID: + *value = current_evmcs->virtual_processor_id; + break; + default: return 1; + } + + return 0; +} + +static inline int evmcs_vmwrite(uint64_t encoding, uint64_t value) +{ + switch (encoding) { + case GUEST_RIP: + current_evmcs->guest_rip = value; + break; + case GUEST_RSP: + current_evmcs->guest_rsp = value; + break; + case GUEST_RFLAGS: + current_evmcs->guest_rflags = value; + break; + case HOST_IA32_PAT: + current_evmcs->host_ia32_pat = value; + break; + case HOST_IA32_EFER: + current_evmcs->host_ia32_efer = value; + break; + case HOST_CR0: + current_evmcs->host_cr0 = value; + break; + case HOST_CR3: + current_evmcs->host_cr3 = value; + break; + case HOST_CR4: + current_evmcs->host_cr4 = value; + break; + case HOST_IA32_SYSENTER_ESP: + current_evmcs->host_ia32_sysenter_esp = value; + break; + case HOST_IA32_SYSENTER_EIP: + current_evmcs->host_ia32_sysenter_eip = value; + break; + case HOST_RIP: + current_evmcs->host_rip = value; + break; + case IO_BITMAP_A: + current_evmcs->io_bitmap_a = value; + break; + case IO_BITMAP_B: + current_evmcs->io_bitmap_b = value; + break; + case MSR_BITMAP: + current_evmcs->msr_bitmap = value; + break; + case GUEST_ES_BASE: + current_evmcs->guest_es_base = value; + break; + case GUEST_CS_BASE: + current_evmcs->guest_cs_base = value; + break; + case GUEST_SS_BASE: + current_evmcs->guest_ss_base = value; + break; + case GUEST_DS_BASE: + current_evmcs->guest_ds_base = value; + break; + case GUEST_FS_BASE: + current_evmcs->guest_fs_base = value; + break; + case GUEST_GS_BASE: + current_evmcs->guest_gs_base = value; + break; + case GUEST_LDTR_BASE: + current_evmcs->guest_ldtr_base = value; + break; + case GUEST_TR_BASE: + current_evmcs->guest_tr_base = value; + break; + case GUEST_GDTR_BASE: + current_evmcs->guest_gdtr_base = value; + break; + case GUEST_IDTR_BASE: + current_evmcs->guest_idtr_base = value; + break; + case TSC_OFFSET: + current_evmcs->tsc_offset = value; + break; + case VIRTUAL_APIC_PAGE_ADDR: + current_evmcs->virtual_apic_page_addr = value; + break; + case VMCS_LINK_POINTER: + current_evmcs->vmcs_link_pointer = value; + break; + case GUEST_IA32_DEBUGCTL: + current_evmcs->guest_ia32_debugctl = value; + break; + case GUEST_IA32_PAT: + current_evmcs->guest_ia32_pat = value; + break; + case GUEST_IA32_EFER: + current_evmcs->guest_ia32_efer = value; + break; + case GUEST_PDPTR0: + current_evmcs->guest_pdptr0 = value; + break; + case GUEST_PDPTR1: + current_evmcs->guest_pdptr1 = value; + break; + case GUEST_PDPTR2: + current_evmcs->guest_pdptr2 = value; + break; + case GUEST_PDPTR3: + current_evmcs->guest_pdptr3 = value; + break; + case GUEST_PENDING_DBG_EXCEPTIONS: + current_evmcs->guest_pending_dbg_exceptions = value; + break; + case GUEST_SYSENTER_ESP: + current_evmcs->guest_sysenter_esp = value; + break; + case GUEST_SYSENTER_EIP: + current_evmcs->guest_sysenter_eip = value; + break; + case CR0_GUEST_HOST_MASK: + current_evmcs->cr0_guest_host_mask = value; + break; + case CR4_GUEST_HOST_MASK: + current_evmcs->cr4_guest_host_mask = value; + break; + case CR0_READ_SHADOW: + current_evmcs->cr0_read_shadow = value; + break; + case CR4_READ_SHADOW: + current_evmcs->cr4_read_shadow = value; + break; + case GUEST_CR0: + current_evmcs->guest_cr0 = value; + break; + case GUEST_CR3: + current_evmcs->guest_cr3 = value; + break; + case GUEST_CR4: + current_evmcs->guest_cr4 = value; + break; + case GUEST_DR7: + current_evmcs->guest_dr7 = value; + break; + case HOST_FS_BASE: + current_evmcs->host_fs_base = value; + break; + case HOST_GS_BASE: + current_evmcs->host_gs_base = value; + break; + case HOST_TR_BASE: + current_evmcs->host_tr_base = value; + break; + case HOST_GDTR_BASE: + current_evmcs->host_gdtr_base = value; + break; + case HOST_IDTR_BASE: + current_evmcs->host_idtr_base = value; + break; + case HOST_RSP: + current_evmcs->host_rsp = value; + break; + case EPT_POINTER: + current_evmcs->ept_pointer = value; + break; + case GUEST_BNDCFGS: + current_evmcs->guest_bndcfgs = value; + break; + case XSS_EXIT_BITMAP: + current_evmcs->xss_exit_bitmap = value; + break; + case GUEST_PHYSICAL_ADDRESS: + current_evmcs->guest_physical_address = value; + break; + case EXIT_QUALIFICATION: + current_evmcs->exit_qualification = value; + break; + case GUEST_LINEAR_ADDRESS: + current_evmcs->guest_linear_address = value; + break; + case VM_EXIT_MSR_STORE_ADDR: + current_evmcs->vm_exit_msr_store_addr = value; + break; + case VM_EXIT_MSR_LOAD_ADDR: + current_evmcs->vm_exit_msr_load_addr = value; + break; + case VM_ENTRY_MSR_LOAD_ADDR: + current_evmcs->vm_entry_msr_load_addr = value; + break; + case CR3_TARGET_VALUE0: + current_evmcs->cr3_target_value0 = value; + break; + case CR3_TARGET_VALUE1: + current_evmcs->cr3_target_value1 = value; + break; + case CR3_TARGET_VALUE2: + current_evmcs->cr3_target_value2 = value; + break; + case CR3_TARGET_VALUE3: + current_evmcs->cr3_target_value3 = value; + break; + case TPR_THRESHOLD: + current_evmcs->tpr_threshold = value; + break; + case GUEST_INTERRUPTIBILITY_INFO: + current_evmcs->guest_interruptibility_info = value; + break; + case CPU_BASED_VM_EXEC_CONTROL: + current_evmcs->cpu_based_vm_exec_control = value; + break; + case EXCEPTION_BITMAP: + current_evmcs->exception_bitmap = value; + break; + case VM_ENTRY_CONTROLS: + current_evmcs->vm_entry_controls = value; + break; + case VM_ENTRY_INTR_INFO_FIELD: + current_evmcs->vm_entry_intr_info_field = value; + break; + case VM_ENTRY_EXCEPTION_ERROR_CODE: + current_evmcs->vm_entry_exception_error_code = value; + break; + case VM_ENTRY_INSTRUCTION_LEN: + current_evmcs->vm_entry_instruction_len = value; + break; + case HOST_IA32_SYSENTER_CS: + current_evmcs->host_ia32_sysenter_cs = value; + break; + case PIN_BASED_VM_EXEC_CONTROL: + current_evmcs->pin_based_vm_exec_control = value; + break; + case VM_EXIT_CONTROLS: + current_evmcs->vm_exit_controls = value; + break; + case SECONDARY_VM_EXEC_CONTROL: + current_evmcs->secondary_vm_exec_control = value; + break; + case GUEST_ES_LIMIT: + current_evmcs->guest_es_limit = value; + break; + case GUEST_CS_LIMIT: + current_evmcs->guest_cs_limit = value; + break; + case GUEST_SS_LIMIT: + current_evmcs->guest_ss_limit = value; + break; + case GUEST_DS_LIMIT: + current_evmcs->guest_ds_limit = value; + break; + case GUEST_FS_LIMIT: + current_evmcs->guest_fs_limit = value; + break; + case GUEST_GS_LIMIT: + current_evmcs->guest_gs_limit = value; + break; + case GUEST_LDTR_LIMIT: + current_evmcs->guest_ldtr_limit = value; + break; + case GUEST_TR_LIMIT: + current_evmcs->guest_tr_limit = value; + break; + case GUEST_GDTR_LIMIT: + current_evmcs->guest_gdtr_limit = value; + break; + case GUEST_IDTR_LIMIT: + current_evmcs->guest_idtr_limit = value; + break; + case GUEST_ES_AR_BYTES: + current_evmcs->guest_es_ar_bytes = value; + break; + case GUEST_CS_AR_BYTES: + current_evmcs->guest_cs_ar_bytes = value; + break; + case GUEST_SS_AR_BYTES: + current_evmcs->guest_ss_ar_bytes = value; + break; + case GUEST_DS_AR_BYTES: + current_evmcs->guest_ds_ar_bytes = value; + break; + case GUEST_FS_AR_BYTES: + current_evmcs->guest_fs_ar_bytes = value; + break; + case GUEST_GS_AR_BYTES: + current_evmcs->guest_gs_ar_bytes = value; + break; + case GUEST_LDTR_AR_BYTES: + current_evmcs->guest_ldtr_ar_bytes = value; + break; + case GUEST_TR_AR_BYTES: + current_evmcs->guest_tr_ar_bytes = value; + break; + case GUEST_ACTIVITY_STATE: + current_evmcs->guest_activity_state = value; + break; + case GUEST_SYSENTER_CS: + current_evmcs->guest_sysenter_cs = value; + break; + case VM_INSTRUCTION_ERROR: + current_evmcs->vm_instruction_error = value; + break; + case VM_EXIT_REASON: + current_evmcs->vm_exit_reason = value; + break; + case VM_EXIT_INTR_INFO: + current_evmcs->vm_exit_intr_info = value; + break; + case VM_EXIT_INTR_ERROR_CODE: + current_evmcs->vm_exit_intr_error_code = value; + break; + case IDT_VECTORING_INFO_FIELD: + current_evmcs->idt_vectoring_info_field = value; + break; + case IDT_VECTORING_ERROR_CODE: + current_evmcs->idt_vectoring_error_code = value; + break; + case VM_EXIT_INSTRUCTION_LEN: + current_evmcs->vm_exit_instruction_len = value; + break; + case VMX_INSTRUCTION_INFO: + current_evmcs->vmx_instruction_info = value; + break; + case PAGE_FAULT_ERROR_CODE_MASK: + current_evmcs->page_fault_error_code_mask = value; + break; + case PAGE_FAULT_ERROR_CODE_MATCH: + current_evmcs->page_fault_error_code_match = value; + break; + case CR3_TARGET_COUNT: + current_evmcs->cr3_target_count = value; + break; + case VM_EXIT_MSR_STORE_COUNT: + current_evmcs->vm_exit_msr_store_count = value; + break; + case VM_EXIT_MSR_LOAD_COUNT: + current_evmcs->vm_exit_msr_load_count = value; + break; + case VM_ENTRY_MSR_LOAD_COUNT: + current_evmcs->vm_entry_msr_load_count = value; + break; + case HOST_ES_SELECTOR: + current_evmcs->host_es_selector = value; + break; + case HOST_CS_SELECTOR: + current_evmcs->host_cs_selector = value; + break; + case HOST_SS_SELECTOR: + current_evmcs->host_ss_selector = value; + break; + case HOST_DS_SELECTOR: + current_evmcs->host_ds_selector = value; + break; + case HOST_FS_SELECTOR: + current_evmcs->host_fs_selector = value; + break; + case HOST_GS_SELECTOR: + current_evmcs->host_gs_selector = value; + break; + case HOST_TR_SELECTOR: + current_evmcs->host_tr_selector = value; + break; + case GUEST_ES_SELECTOR: + current_evmcs->guest_es_selector = value; + break; + case GUEST_CS_SELECTOR: + current_evmcs->guest_cs_selector = value; + break; + case GUEST_SS_SELECTOR: + current_evmcs->guest_ss_selector = value; + break; + case GUEST_DS_SELECTOR: + current_evmcs->guest_ds_selector = value; + break; + case GUEST_FS_SELECTOR: + current_evmcs->guest_fs_selector = value; + break; + case GUEST_GS_SELECTOR: + current_evmcs->guest_gs_selector = value; + break; + case GUEST_LDTR_SELECTOR: + current_evmcs->guest_ldtr_selector = value; + break; + case GUEST_TR_SELECTOR: + current_evmcs->guest_tr_selector = value; + break; + case VIRTUAL_PROCESSOR_ID: + current_evmcs->virtual_processor_id = value; + break; + default: return 1; + } + + return 0; +} + +static inline int evmcs_vmlaunch(void) +{ + int ret; + + current_evmcs->hv_clean_fields = 0; + + __asm__ __volatile__("push %%rbp;" + "push %%rcx;" + "push %%rdx;" + "push %%rsi;" + "push %%rdi;" + "push $0;" + "mov %%rsp, (%[host_rsp]);" + "lea 1f(%%rip), %%rax;" + "mov %%rax, (%[host_rip]);" + "vmlaunch;" + "incq (%%rsp);" + "1: pop %%rax;" + "pop %%rdi;" + "pop %%rsi;" + "pop %%rdx;" + "pop %%rcx;" + "pop %%rbp;" + : [ret]"=&a"(ret) + : [host_rsp]"r" + ((uint64_t)¤t_evmcs->host_rsp), + [host_rip]"r" + ((uint64_t)¤t_evmcs->host_rip) + : "memory", "cc", "rbx", "r8", "r9", "r10", + "r11", "r12", "r13", "r14", "r15"); + return ret; +} + +/* + * No guest state (e.g. GPRs) is established by this vmresume. + */ +static inline int evmcs_vmresume(void) +{ + int ret; + + current_evmcs->hv_clean_fields = 0; + + __asm__ __volatile__("push %%rbp;" + "push %%rcx;" + "push %%rdx;" + "push %%rsi;" + "push %%rdi;" + "push $0;" + "mov %%rsp, (%[host_rsp]);" + "lea 1f(%%rip), %%rax;" + "mov %%rax, (%[host_rip]);" + "vmresume;" + "incq (%%rsp);" + "1: pop %%rax;" + "pop %%rdi;" + "pop %%rsi;" + "pop %%rdx;" + "pop %%rcx;" + "pop %%rbp;" + : [ret]"=&a"(ret) + : [host_rsp]"r" + ((uint64_t)¤t_evmcs->host_rsp), + [host_rip]"r" + ((uint64_t)¤t_evmcs->host_rip) + : "memory", "cc", "rbx", "r8", "r9", "r10", + "r11", "r12", "r13", "r14", "r15"); + return ret; +} + +#endif /* !SELFTEST_KVM_EVMCS_H */ diff --git a/tools/testing/selftests/kvm/include/x86_64/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h index 4bbee8560292..c9bd935b939c 100644 --- a/tools/testing/selftests/kvm/include/x86_64/vmx.h +++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h @@ -339,6 +339,8 @@ struct vmx_msr_entry { uint64_t value; } __attribute__ ((aligned(16))); +#include "evmcs.h" + static inline int vmxon(uint64_t phys) { uint8_t ret; @@ -372,6 +374,9 @@ static inline int vmptrld(uint64_t vmcs_pa) { uint8_t ret; + if (enable_evmcs) + return -1; + __asm__ __volatile__ ("vmptrld %[pa]; setna %[ret]" : [ret]"=rm"(ret) : [pa]"m"(vmcs_pa) @@ -385,6 +390,9 @@ static inline int vmptrst(uint64_t *value) uint64_t tmp; uint8_t ret; + if (enable_evmcs) + return evmcs_vmptrst(value); + __asm__ __volatile__("vmptrst %[value]; setna %[ret]" : [value]"=m"(tmp), [ret]"=rm"(ret) : : "cc", "memory"); @@ -411,6 +419,9 @@ static inline int vmlaunch(void) { int ret; + if (enable_evmcs) + return evmcs_vmlaunch(); + __asm__ __volatile__("push %%rbp;" "push %%rcx;" "push %%rdx;" @@ -443,6 +454,9 @@ static inline int vmresume(void) { int ret; + if (enable_evmcs) + return evmcs_vmresume(); + __asm__ __volatile__("push %%rbp;" "push %%rcx;" "push %%rdx;" @@ -482,6 +496,9 @@ static inline int vmread(uint64_t encoding, uint64_t *value) uint64_t tmp; uint8_t ret; + if (enable_evmcs) + return evmcs_vmread(encoding, value); + __asm__ __volatile__("vmread %[encoding], %[value]; setna %[ret]" : [value]"=rm"(tmp), [ret]"=rm"(ret) : [encoding]"r"(encoding) @@ -506,6 +523,9 @@ static inline int vmwrite(uint64_t encoding, uint64_t value) { uint8_t ret; + if (enable_evmcs) + return evmcs_vmwrite(encoding, value); + __asm__ __volatile__ ("vmwrite %[value], %[encoding]; setna %[ret]" : [ret]"=rm"(ret) : [value]"rm"(value), [encoding]"r"(encoding) @@ -543,6 +563,14 @@ struct vmx_pages { void *vmwrite_hva; uint64_t vmwrite_gpa; void *vmwrite; + + void *vp_assist_hva; + uint64_t vp_assist_gpa; + void *vp_assist; + + void *enlightened_vmcs_hva; + uint64_t enlightened_vmcs_gpa; + void *enlightened_vmcs; }; struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva); diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c index cc356da9b3d8..771ba6bf751c 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c +++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c @@ -13,6 +13,8 @@ #include "processor.h" #include "vmx.h" +bool enable_evmcs; + /* Allocate memory regions for nested VMX tests. * * Input Args: @@ -62,6 +64,20 @@ vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva) vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite); memset(vmx->vmwrite_hva, 0, getpagesize()); + /* Setup of a region of guest memory for the VP Assist page. */ + vmx->vp_assist = (void *)vm_vaddr_alloc(vm, getpagesize(), + 0x10000, 0, 0); + vmx->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)vmx->vp_assist); + vmx->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vp_assist); + + /* Setup of a region of guest memory for the enlightened VMCS. */ + vmx->enlightened_vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(), + 0x10000, 0, 0); + vmx->enlightened_vmcs_hva = + addr_gva2hva(vm, (uintptr_t)vmx->enlightened_vmcs); + vmx->enlightened_vmcs_gpa = + addr_gva2gpa(vm, (uintptr_t)vmx->enlightened_vmcs); + *p_vmx_gva = vmx_gva; return vmx; } @@ -112,18 +128,26 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx) bool load_vmcs(struct vmx_pages *vmx) { - /* Load a VMCS. */ - *(uint32_t *)(vmx->vmcs) = vmcs_revision(); - if (vmclear(vmx->vmcs_gpa)) - return false; + if (!enable_evmcs) { + /* Load a VMCS. */ + *(uint32_t *)(vmx->vmcs) = vmcs_revision(); + if (vmclear(vmx->vmcs_gpa)) + return false; - if (vmptrld(vmx->vmcs_gpa)) - return false; + if (vmptrld(vmx->vmcs_gpa)) + return false; - /* Setup shadow VMCS, do not load it yet. */ - *(uint32_t *)(vmx->shadow_vmcs) = vmcs_revision() | 0x80000000ul; - if (vmclear(vmx->shadow_vmcs_gpa)) - return false; + /* Setup shadow VMCS, do not load it yet. */ + *(uint32_t *)(vmx->shadow_vmcs) = + vmcs_revision() | 0x80000000ul; + if (vmclear(vmx->shadow_vmcs_gpa)) + return false; + } else { + if (evmcs_vmptrld(vmx->enlightened_vmcs_gpa, + vmx->enlightened_vmcs)) + return false; + current_evmcs->revision_id = vmcs_revision(); + } return true; } diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c new file mode 100644 index 000000000000..92c2cfd1b182 --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018, Red Hat, Inc. + * + * Tests for Enlightened VMCS, including nested guest state. + */ +#define _GNU_SOURCE /* for program_invocation_short_name */ +#include +#include +#include +#include +#include + +#include "test_util.h" + +#include "kvm_util.h" + +#include "vmx.h" + +#define VCPU_ID 5 + +static bool have_nested_state; + +void l2_guest_code(void) +{ + GUEST_SYNC(6); + + GUEST_SYNC(7); + + /* Done, exit to L1 and never come back. */ + vmcall(); +} + +void l1_guest_code(struct vmx_pages *vmx_pages) +{ +#define L2_GUEST_STACK_SIZE 64 + unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; + + enable_vp_assist(vmx_pages->vp_assist_gpa, vmx_pages->vp_assist); + + GUEST_ASSERT(vmx_pages->vmcs_gpa); + GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); + GUEST_SYNC(3); + GUEST_ASSERT(load_vmcs(vmx_pages)); + GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa); + + GUEST_SYNC(4); + GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa); + + prepare_vmcs(vmx_pages, l2_guest_code, + &l2_guest_stack[L2_GUEST_STACK_SIZE]); + + GUEST_SYNC(5); + GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa); + GUEST_ASSERT(!vmlaunch()); + GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa); + GUEST_SYNC(8); + GUEST_ASSERT(!vmresume()); + GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); + GUEST_SYNC(9); +} + +void guest_code(struct vmx_pages *vmx_pages) +{ + GUEST_SYNC(1); + GUEST_SYNC(2); + + if (vmx_pages) + l1_guest_code(vmx_pages); + + GUEST_DONE(); +} + +int main(int argc, char *argv[]) +{ + struct vmx_pages *vmx_pages = NULL; + vm_vaddr_t vmx_pages_gva = 0; + + struct kvm_regs regs1, regs2; + struct kvm_vm *vm; + struct kvm_run *run; + struct kvm_x86_state *state; + struct ucall uc; + int stage; + uint16_t evmcs_ver; + struct kvm_enable_cap enable_evmcs_cap = { + .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS, + .args[0] = (unsigned long)&evmcs_ver + }; + + struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); + + /* Create VM */ + vm = vm_create_default(VCPU_ID, 0, guest_code); + + vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); + + if (!kvm_check_cap(KVM_CAP_NESTED_STATE) || + !kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) { + printf("capabilities not available, skipping test\n"); + exit(KSFT_SKIP); + } + + vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); + + run = vcpu_state(vm, VCPU_ID); + + vcpu_regs_get(vm, VCPU_ID, ®s1); + + vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva); + vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); + + for (stage = 1;; stage++) { + _vcpu_run(vm, VCPU_ID); + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, + "Unexpected exit reason: %u (%s),\n", + run->exit_reason, + exit_reason_str(run->exit_reason)); + + memset(®s1, 0, sizeof(regs1)); + vcpu_regs_get(vm, VCPU_ID, ®s1); + switch (get_ucall(vm, VCPU_ID, &uc)) { + case UCALL_ABORT: + TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0], + __FILE__, uc.args[1]); + /* NOT REACHED */ + case UCALL_SYNC: + break; + case UCALL_DONE: + goto done; + default: + TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd); + } + + /* UCALL_SYNC is handled here. */ + TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") && + uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx", + stage, (ulong)uc.args[1]); + + state = vcpu_save_state(vm, VCPU_ID); + kvm_vm_release(vm); + + /* Restore state in a new VM. */ + kvm_vm_restart(vm, O_RDWR); + vm_vcpu_add(vm, VCPU_ID, 0, 0); + vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); + vcpu_load_state(vm, VCPU_ID, state); + run = vcpu_state(vm, VCPU_ID); + free(state); + + memset(®s2, 0, sizeof(regs2)); + vcpu_regs_get(vm, VCPU_ID, ®s2); + TEST_ASSERT(!memcmp(®s1, ®s2, sizeof(regs2)), + "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx", + (ulong) regs2.rdi, (ulong) regs2.rsi); + } + +done: + kvm_vm_free(vm); +} From bba9ce58d9cb7ba9e121627108eca986760ad0e8 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Tue, 16 Oct 2018 14:29:18 -0700 Subject: [PATCH 186/204] KVM: Documentation: Fix omission in struct kvm_vcpu_events The header file indicates that there are 36 reserved bytes at the end of this structure. Adjust the documentation to agree with the header file. Signed-off-by: Jim Mattson Signed-off-by: Paolo Bonzini --- Documentation/virtual/kvm/api.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 70f9c8bb1840..cf9422f4f101 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -873,6 +873,7 @@ struct kvm_vcpu_events { __u8 smm_inside_nmi; __u8 latched_init; } smi; + __u32 reserved[9]; }; Only two fields are defined in the flags field: From c851436a34cad09388f1303e11ccb6b9420e5692 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Tue, 16 Oct 2018 14:29:19 -0700 Subject: [PATCH 187/204] kvm: x86: Add has_payload and payload to kvm_queued_exception The payload associated with a #PF exception is the linear address of the fault to be loaded into CR2 when the fault is delivered. The payload associated with a #DB exception is a mask of the DR6 bits to be set (or in the case of DR6.RTM, cleared) when the fault is delivered. Add fields has_payload and payload to kvm_queued_exception to track payloads for pending exceptions. The new fields are introduced here, but for now, they are just cleared. Reported-by: Jim Mattson Suggested-by: Paolo Bonzini Signed-off-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/x86.c | 8 ++++++++ 2 files changed, 10 insertions(+) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 258fc2c85301..20f7c994afeb 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -628,6 +628,8 @@ struct kvm_vcpu_arch { bool has_error_code; u8 nr; u32 error_code; + unsigned long payload; + bool has_payload; u8 nested_apf; } exception; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0fe1e3762e82..532b660fe497 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -431,6 +431,8 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, vcpu->arch.exception.has_error_code = has_error; vcpu->arch.exception.nr = nr; vcpu->arch.exception.error_code = error_code; + vcpu->arch.exception.has_payload = false; + vcpu->arch.exception.payload = 0; return; } @@ -455,6 +457,8 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, vcpu->arch.exception.has_error_code = true; vcpu->arch.exception.nr = DF_VECTOR; vcpu->arch.exception.error_code = 0; + vcpu->arch.exception.has_payload = false; + vcpu->arch.exception.payload = 0; } else /* replace previous exception with a new one in a hope that instruction re-execution will regenerate lost @@ -3436,6 +3440,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, vcpu->arch.exception.nr = events->exception.nr; vcpu->arch.exception.has_error_code = events->exception.has_error_code; vcpu->arch.exception.error_code = events->exception.error_code; + vcpu->arch.exception.has_payload = false; + vcpu->arch.exception.payload = 0; vcpu->arch.interrupt.injected = events->interrupt.injected; vcpu->arch.interrupt.nr = events->interrupt.nr; @@ -9486,6 +9492,8 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, vcpu->arch.exception.nr = 0; vcpu->arch.exception.has_error_code = false; vcpu->arch.exception.error_code = 0; + vcpu->arch.exception.has_payload = false; + vcpu->arch.exception.payload = 0; } else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { fault.vector = PF_VECTOR; fault.error_code_valid = true; From 59073aaf6de0d2dacc2603cee6d1d6cd5592ac08 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Tue, 16 Oct 2018 14:29:20 -0700 Subject: [PATCH 188/204] kvm: x86: Add exception payload fields to kvm_vcpu_events The per-VM capability KVM_CAP_EXCEPTION_PAYLOAD (to be introduced in a later commit) adds the following fields to struct kvm_vcpu_events: exception_has_payload, exception_payload, and exception.pending. With this capability set, all of the details of vcpu->arch.exception, including the payload for a pending exception, are reported to userspace in response to KVM_GET_VCPU_EVENTS. With this capability clear, the original ABI is preserved, and the exception.injected field is set for either pending or injected exceptions. When userspace calls KVM_SET_VCPU_EVENTS with KVM_CAP_EXCEPTION_PAYLOAD clear, exception.injected is no longer translated to exception.pending. KVM_SET_VCPU_EVENTS can now only establish a pending exception when KVM_CAP_EXCEPTION_PAYLOAD is set. Reported-by: Jim Mattson Suggested-by: Paolo Bonzini Signed-off-by: Jim Mattson Signed-off-by: Paolo Bonzini --- Documentation/virtual/kvm/api.txt | 24 ++++++++--- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/include/uapi/asm/kvm.h | 7 ++- arch/x86/kvm/x86.c | 61 ++++++++++++++++++++------- tools/arch/x86/include/uapi/asm/kvm.h | 10 ++++- 5 files changed, 77 insertions(+), 26 deletions(-) diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index cf9422f4f101..e900ac31501c 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -850,7 +850,7 @@ struct kvm_vcpu_events { __u8 injected; __u8 nr; __u8 has_error_code; - __u8 pad; + __u8 pending; __u32 error_code; } exception; struct { @@ -873,16 +873,23 @@ struct kvm_vcpu_events { __u8 smm_inside_nmi; __u8 latched_init; } smi; - __u32 reserved[9]; + __u8 reserved[27]; + __u8 exception_has_payload; + __u64 exception_payload; }; -Only two fields are defined in the flags field: +The following bits are defined in the flags field: -- KVM_VCPUEVENT_VALID_SHADOW may be set in the flags field to signal that +- KVM_VCPUEVENT_VALID_SHADOW may be set to signal that interrupt.shadow contains a valid state. -- KVM_VCPUEVENT_VALID_SMM may be set in the flags field to signal that - smi contains a valid state. +- KVM_VCPUEVENT_VALID_SMM may be set to signal that smi contains a + valid state. + +- KVM_VCPUEVENT_VALID_PAYLOAD may be set to signal that the + exception_has_payload, exception_payload, and exception.pending + fields contain a valid state. This bit will be set whenever + KVM_CAP_EXCEPTION_PAYLOAD is enabled. ARM/ARM64: @@ -962,6 +969,11 @@ shall be written into the VCPU. KVM_VCPUEVENT_VALID_SMM can only be set if KVM_CAP_X86_SMM is available. +If KVM_CAP_EXCEPTION_PAYLOAD is enabled, KVM_VCPUEVENT_VALID_PAYLOAD +can be set in the flags field to signal that the +exception_has_payload, exception_payload, and exception.pending fields +contain a valid state and shall be written into the VCPU. + ARM/ARM64: Set the pending SError exception state for this VCPU. It is not possible to diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 20f7c994afeb..55e51ff7e421 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -919,6 +919,7 @@ struct kvm_arch { bool x2apic_broadcast_quirk_disabled; bool guest_can_read_msr_platform_info; + bool exception_payload_enabled; }; struct kvm_vm_stat { diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index ab76aa1d3a4d..dabfcf7c3941 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -288,6 +288,7 @@ struct kvm_reinject_control { #define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002 #define KVM_VCPUEVENT_VALID_SHADOW 0x00000004 #define KVM_VCPUEVENT_VALID_SMM 0x00000008 +#define KVM_VCPUEVENT_VALID_PAYLOAD 0x00000010 /* Interrupt shadow states */ #define KVM_X86_SHADOW_INT_MOV_SS 0x01 @@ -299,7 +300,7 @@ struct kvm_vcpu_events { __u8 injected; __u8 nr; __u8 has_error_code; - __u8 pad; + __u8 pending; __u32 error_code; } exception; struct { @@ -322,7 +323,9 @@ struct kvm_vcpu_events { __u8 smm_inside_nmi; __u8 latched_init; } smi; - __u32 reserved[9]; + __u8 reserved[27]; + __u8 exception_has_payload; + __u64 exception_payload; }; /* for KVM_GET/SET_DEBUGREGS */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 532b660fe497..2ef706574589 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3373,19 +3373,33 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { process_nmi(vcpu); + /* - * FIXME: pass injected and pending separately. This is only - * needed for nested virtualization, whose state cannot be - * migrated yet. For now we can combine them. + * The API doesn't provide the instruction length for software + * exceptions, so don't report them. As long as the guest RIP + * isn't advanced, we should expect to encounter the exception + * again. */ - events->exception.injected = - (vcpu->arch.exception.pending || - vcpu->arch.exception.injected) && - !kvm_exception_is_soft(vcpu->arch.exception.nr); + if (kvm_exception_is_soft(vcpu->arch.exception.nr)) { + events->exception.injected = 0; + events->exception.pending = 0; + } else { + events->exception.injected = vcpu->arch.exception.injected; + events->exception.pending = vcpu->arch.exception.pending; + /* + * For ABI compatibility, deliberately conflate + * pending and injected exceptions when + * KVM_CAP_EXCEPTION_PAYLOAD isn't enabled. + */ + if (!vcpu->kvm->arch.exception_payload_enabled) + events->exception.injected |= + vcpu->arch.exception.pending; + } events->exception.nr = vcpu->arch.exception.nr; events->exception.has_error_code = vcpu->arch.exception.has_error_code; - events->exception.pad = 0; events->exception.error_code = vcpu->arch.exception.error_code; + events->exception_has_payload = vcpu->arch.exception.has_payload; + events->exception_payload = vcpu->arch.exception.payload; events->interrupt.injected = vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; @@ -3409,6 +3423,9 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SHADOW | KVM_VCPUEVENT_VALID_SMM); + if (vcpu->kvm->arch.exception_payload_enabled) + events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD; + memset(&events->reserved, 0, sizeof(events->reserved)); } @@ -3420,12 +3437,24 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR | KVM_VCPUEVENT_VALID_SHADOW - | KVM_VCPUEVENT_VALID_SMM)) + | KVM_VCPUEVENT_VALID_SMM + | KVM_VCPUEVENT_VALID_PAYLOAD)) return -EINVAL; - if (events->exception.injected && - (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR || - is_guest_mode(vcpu))) + if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) { + if (!vcpu->kvm->arch.exception_payload_enabled) + return -EINVAL; + if (events->exception.pending) + events->exception.injected = 0; + else + events->exception_has_payload = 0; + } else { + events->exception.pending = 0; + events->exception_has_payload = 0; + } + + if ((events->exception.injected || events->exception.pending) && + (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR)) return -EINVAL; /* INITs are latched while in SMM */ @@ -3435,13 +3464,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, return -EINVAL; process_nmi(vcpu); - vcpu->arch.exception.injected = false; - vcpu->arch.exception.pending = events->exception.injected; + vcpu->arch.exception.injected = events->exception.injected; + vcpu->arch.exception.pending = events->exception.pending; vcpu->arch.exception.nr = events->exception.nr; vcpu->arch.exception.has_error_code = events->exception.has_error_code; vcpu->arch.exception.error_code = events->exception.error_code; - vcpu->arch.exception.has_payload = false; - vcpu->arch.exception.payload = 0; + vcpu->arch.exception.has_payload = events->exception_has_payload; + vcpu->arch.exception.payload = events->exception_payload; vcpu->arch.interrupt.injected = events->interrupt.injected; vcpu->arch.interrupt.nr = events->interrupt.nr; diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h index 86299efa804a..b09875f580d5 100644 --- a/tools/arch/x86/include/uapi/asm/kvm.h +++ b/tools/arch/x86/include/uapi/asm/kvm.h @@ -288,6 +288,7 @@ struct kvm_reinject_control { #define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002 #define KVM_VCPUEVENT_VALID_SHADOW 0x00000004 #define KVM_VCPUEVENT_VALID_SMM 0x00000008 +#define KVM_VCPUEVENT_VALID_PAYLOAD 0x00000010 /* Interrupt shadow states */ #define KVM_X86_SHADOW_INT_MOV_SS 0x01 @@ -299,7 +300,10 @@ struct kvm_vcpu_events { __u8 injected; __u8 nr; __u8 has_error_code; - __u8 pad; + union { + __u8 pad; + __u8 pending; + }; __u32 error_code; } exception; struct { @@ -322,7 +326,9 @@ struct kvm_vcpu_events { __u8 smm_inside_nmi; __u8 latched_init; } smi; - __u32 reserved[9]; + __u8 reserved[27]; + __u8 exception_has_payload; + __u64 exception_payload; }; /* for KVM_GET/SET_DEBUGREGS */ From 91e86d225ef3da80d33a8fd7695316c31c0810c9 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Tue, 16 Oct 2018 14:29:21 -0700 Subject: [PATCH 189/204] kvm: x86: Add payload operands to kvm_multiple_exception kvm_multiple_exception now takes two additional operands: has_payload and payload, so that updates to CR2 (and DR6 under VMX) can be delayed until the exception is delivered. This is necessary to properly emulate VMX or SVM hardware behavior for nested virtualization. The new behavior is triggered by vcpu->kvm->arch.exception_payload_enabled, which will (later) be set by a new per-VM capability, KVM_CAP_EXCEPTION_PAYLOAD. Reported-by: Jim Mattson Suggested-by: Paolo Bonzini Signed-off-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2ef706574589..25a2bac920bb 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -402,7 +402,7 @@ static int exception_type(int vector) static void kvm_multiple_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error, u32 error_code, - bool reinject) + bool has_payload, unsigned long payload, bool reinject) { u32 prev_nr; int class1, class2; @@ -424,6 +424,14 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, */ WARN_ON_ONCE(vcpu->arch.exception.pending); vcpu->arch.exception.injected = true; + if (WARN_ON_ONCE(has_payload)) { + /* + * A reinjected event has already + * delivered its payload. + */ + has_payload = false; + payload = 0; + } } else { vcpu->arch.exception.pending = true; vcpu->arch.exception.injected = false; @@ -431,8 +439,8 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, vcpu->arch.exception.has_error_code = has_error; vcpu->arch.exception.nr = nr; vcpu->arch.exception.error_code = error_code; - vcpu->arch.exception.has_payload = false; - vcpu->arch.exception.payload = 0; + vcpu->arch.exception.has_payload = has_payload; + vcpu->arch.exception.payload = payload; return; } @@ -468,13 +476,13 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) { - kvm_multiple_exception(vcpu, nr, false, 0, false); + kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false); } EXPORT_SYMBOL_GPL(kvm_queue_exception); void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) { - kvm_multiple_exception(vcpu, nr, false, 0, true); + kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true); } EXPORT_SYMBOL_GPL(kvm_requeue_exception); @@ -521,13 +529,13 @@ EXPORT_SYMBOL_GPL(kvm_inject_nmi); void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { - kvm_multiple_exception(vcpu, nr, true, error_code, false); + kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false); } EXPORT_SYMBOL_GPL(kvm_queue_exception_e); void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { - kvm_multiple_exception(vcpu, nr, true, error_code, true); + kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true); } EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); From da998b46d244767505e41d050dcac5e4d03ba96f Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Tue, 16 Oct 2018 14:29:22 -0700 Subject: [PATCH 190/204] kvm: x86: Defer setting of CR2 until #PF delivery When exception payloads are enabled by userspace (which is not yet possible) and a #PF is raised in L2, defer the setting of CR2 until the #PF is delivered. This allows the L1 hypervisor to intercept the fault before CR2 is modified. For backwards compatibility, when exception payloads are not enabled by userspace, kvm_multiple_exception modifies CR2 when the #PF exception is raised. Reported-by: Jim Mattson Suggested-by: Paolo Bonzini Signed-off-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm.c | 13 ++++++------ arch/x86/kvm/vmx.c | 19 +++++++++--------- arch/x86/kvm/x86.c | 49 ++++++++++++++++++++++++++++++++++++++++++---- arch/x86/kvm/x86.h | 2 ++ 4 files changed, 62 insertions(+), 21 deletions(-) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 47b07211c5b1..1086bebaeea3 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -805,6 +805,8 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu) nested_svm_check_exception(svm, nr, has_error_code, error_code)) return; + kvm_deliver_exception_payload(&svm->vcpu); + if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) { unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu); @@ -2965,16 +2967,13 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, svm->vmcb->control.exit_info_1 = error_code; /* - * FIXME: we should not write CR2 when L1 intercepts an L2 #PF exception. - * The fix is to add the ancillary datum (CR2 or DR6) to structs - * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6 can be - * written only when inject_pending_event runs (DR6 would written here - * too). This should be conditional on a new capability---if the - * capability is disabled, kvm_multiple_exception would write the - * ancillary information to CR2 or DR6, for backwards ABI-compatibility. + * EXITINFO2 is undefined for all exception intercepts other + * than #PF. */ if (svm->vcpu.arch.exception.nested_apf) svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token; + else if (svm->vcpu.arch.exception.has_payload) + svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload; else svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 8d7c60faaacd..0c7efb1de29d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3292,27 +3292,24 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); unsigned int nr = vcpu->arch.exception.nr; + bool has_payload = vcpu->arch.exception.has_payload; + unsigned long payload = vcpu->arch.exception.payload; if (nr == PF_VECTOR) { if (vcpu->arch.exception.nested_apf) { *exit_qual = vcpu->arch.apf.nested_apf_token; return 1; } - /* - * FIXME: we must not write CR2 when L1 intercepts an L2 #PF exception. - * The fix is to add the ancillary datum (CR2 or DR6) to structs - * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6 - * can be written only when inject_pending_event runs. This should be - * conditional on a new capability---if the capability is disabled, - * kvm_multiple_exception would write the ancillary information to - * CR2 or DR6, for backwards ABI-compatibility. - */ if (nested_vmx_is_page_fault_vmexit(vmcs12, vcpu->arch.exception.error_code)) { - *exit_qual = vcpu->arch.cr2; + *exit_qual = has_payload ? payload : vcpu->arch.cr2; return 1; } } else { + /* + * FIXME: we must not write DR6 when L1 intercepts an + * L2 #DB exception. + */ if (vmcs12->exception_bitmap & (1u << nr)) { if (nr == DB_VECTOR) { *exit_qual = vcpu->arch.dr6; @@ -3349,6 +3346,8 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu) u32 error_code = vcpu->arch.exception.error_code; u32 intr_info = nr | INTR_INFO_VALID_MASK; + kvm_deliver_exception_payload(vcpu); + if (has_error_code) { vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); intr_info |= INTR_INFO_DELIVER_CODE_MASK; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 25a2bac920bb..4e9536c335d6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -400,6 +400,26 @@ static int exception_type(int vector) return EXCPT_FAULT; } +void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu) +{ + unsigned nr = vcpu->arch.exception.nr; + bool has_payload = vcpu->arch.exception.has_payload; + unsigned long payload = vcpu->arch.exception.payload; + + if (!has_payload) + return; + + switch (nr) { + case PF_VECTOR: + vcpu->arch.cr2 = payload; + break; + } + + vcpu->arch.exception.has_payload = false; + vcpu->arch.exception.payload = 0; +} +EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload); + static void kvm_multiple_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error, u32 error_code, bool has_payload, unsigned long payload, bool reinject) @@ -441,6 +461,18 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, vcpu->arch.exception.error_code = error_code; vcpu->arch.exception.has_payload = has_payload; vcpu->arch.exception.payload = payload; + /* + * In guest mode, payload delivery should be deferred, + * so that the L1 hypervisor can intercept #PF before + * CR2 is modified. However, for ABI compatibility + * with KVM_GET_VCPU_EVENTS and KVM_SET_VCPU_EVENTS, + * we can't delay payload delivery unless userspace + * has enabled this functionality via the per-VM + * capability, KVM_CAP_EXCEPTION_PAYLOAD. + */ + if (!vcpu->kvm->arch.exception_payload_enabled || + !is_guest_mode(vcpu)) + kvm_deliver_exception_payload(vcpu); return; } @@ -486,6 +518,13 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) } EXPORT_SYMBOL_GPL(kvm_requeue_exception); +static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr, + u32 error_code, unsigned long payload) +{ + kvm_multiple_exception(vcpu, nr, true, error_code, + true, payload, false); +} + int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) { if (err) @@ -502,11 +541,13 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) ++vcpu->stat.pf_guest; vcpu->arch.exception.nested_apf = is_guest_mode(vcpu) && fault->async_page_fault; - if (vcpu->arch.exception.nested_apf) + if (vcpu->arch.exception.nested_apf) { vcpu->arch.apf.nested_apf_token = fault->address; - else - vcpu->arch.cr2 = fault->address; - kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); + kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); + } else { + kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code, + fault->address); + } } EXPORT_SYMBOL_GPL(kvm_inject_page_fault); diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 67b9568613f3..224cd0a47568 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -266,6 +266,8 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, int handle_ud(struct kvm_vcpu *vcpu); +void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu); + void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu); u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data); From f10c729ff965283d2086aa03d139dcf82da86a96 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Tue, 16 Oct 2018 14:29:23 -0700 Subject: [PATCH 191/204] kvm: vmx: Defer setting of DR6 until #DB delivery When exception payloads are enabled by userspace (which is not yet possible) and a #DB is raised in L2, defer the setting of DR6 until later. Under VMX, this allows the L1 hypervisor to intercept the fault before DR6 is modified. Under SVM, DR6 is modified before L1 can intercept the fault (as has always been the case with DR7). Note that the payload associated with a #DB exception includes only the "new DR6 bits." When the payload is delievered, DR6.B0-B3 will be cleared and DR6.RTM will be set prior to merging in the new DR6 bits. Also note that bit 16 in the "new DR6 bits" is set to indicate that a debug exception (#DB) or a breakpoint exception (#BP) occurred inside an RTM region while advanced debugging of RTM transactional regions was enabled. Though the reverse of DR6.RTM, this makes the #DB payload field compatible with both the pending debug exceptions field under VMX and the exit qualification for #DB exceptions under VMX. Reported-by: Jim Mattson Suggested-by: Paolo Bonzini Signed-off-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 24 +++++++--------- arch/x86/kvm/x86.c | 69 ++++++++++++++++++++++++++++++++++------------ 2 files changed, 62 insertions(+), 31 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 0c7efb1de29d..91f4f5288150 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3305,21 +3305,17 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit *exit_qual = has_payload ? payload : vcpu->arch.cr2; return 1; } - } else { - /* - * FIXME: we must not write DR6 when L1 intercepts an - * L2 #DB exception. - */ - if (vmcs12->exception_bitmap & (1u << nr)) { - if (nr == DB_VECTOR) { - *exit_qual = vcpu->arch.dr6; - *exit_qual &= ~(DR6_FIXED_1 | DR6_BT); - *exit_qual ^= DR6_RTM; - } else { - *exit_qual = 0; + } else if (vmcs12->exception_bitmap & (1u << nr)) { + if (nr == DB_VECTOR) { + if (!has_payload) { + payload = vcpu->arch.dr6; + payload &= ~(DR6_FIXED_1 | DR6_BT); + payload ^= DR6_RTM; } - return 1; - } + *exit_qual = payload; + } else + *exit_qual = 0; + return 1; } return 0; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4e9536c335d6..bd4e402b2e79 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -410,6 +410,28 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu) return; switch (nr) { + case DB_VECTOR: + /* + * "Certain debug exceptions may clear bit 0-3. The + * remaining contents of the DR6 register are never + * cleared by the processor". + */ + vcpu->arch.dr6 &= ~DR_TRAP_BITS; + /* + * DR6.RTM is set by all #DB exceptions that don't clear it. + */ + vcpu->arch.dr6 |= DR6_RTM; + vcpu->arch.dr6 |= payload; + /* + * Bit 16 should be set in the payload whenever the #DB + * exception should clear DR6.RTM. This makes the payload + * compatible with the pending debug exceptions under VMX. + * Though not currently documented in the SDM, this also + * makes the payload compatible with the exit qualification + * for #DB exceptions under VMX. + */ + vcpu->arch.dr6 ^= payload & DR6_RTM; + break; case PF_VECTOR: vcpu->arch.cr2 = payload; break; @@ -464,11 +486,13 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, /* * In guest mode, payload delivery should be deferred, * so that the L1 hypervisor can intercept #PF before - * CR2 is modified. However, for ABI compatibility - * with KVM_GET_VCPU_EVENTS and KVM_SET_VCPU_EVENTS, - * we can't delay payload delivery unless userspace - * has enabled this functionality via the per-VM - * capability, KVM_CAP_EXCEPTION_PAYLOAD. + * CR2 is modified (or intercept #DB before DR6 is + * modified under nVMX). However, for ABI + * compatibility with KVM_GET_VCPU_EVENTS and + * KVM_SET_VCPU_EVENTS, we can't delay payload + * delivery unless userspace has enabled this + * functionality via the per-VM capability, + * KVM_CAP_EXCEPTION_PAYLOAD. */ if (!vcpu->kvm->arch.exception_payload_enabled || !is_guest_mode(vcpu)) @@ -518,6 +542,12 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) } EXPORT_SYMBOL_GPL(kvm_requeue_exception); +static void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, + unsigned long payload) +{ + kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false); +} + static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code, unsigned long payload) { @@ -6156,14 +6186,7 @@ static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r) kvm_run->exit_reason = KVM_EXIT_DEBUG; *r = EMULATE_USER_EXIT; } else { - /* - * "Certain debug exceptions may clear bit 0-3. The - * remaining contents of the DR6 register are never - * cleared by the processor". - */ - vcpu->arch.dr6 &= ~15; - vcpu->arch.dr6 |= DR6_BS | DR6_RTM; - kvm_queue_exception(vcpu, DB_VECTOR); + kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS); } } @@ -7102,10 +7125,22 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | X86_EFLAGS_RF); - if (vcpu->arch.exception.nr == DB_VECTOR && - (vcpu->arch.dr7 & DR7_GD)) { - vcpu->arch.dr7 &= ~DR7_GD; - kvm_update_dr7(vcpu); + if (vcpu->arch.exception.nr == DB_VECTOR) { + /* + * This code assumes that nSVM doesn't use + * check_nested_events(). If it does, the + * DR6/DR7 changes should happen before L1 + * gets a #VMEXIT for an intercepted #DB in + * L2. (Under VMX, on the other hand, the + * DR6/DR7 changes should not happen in the + * event of a VM-exit to L1 for an intercepted + * #DB in L2.) + */ + kvm_deliver_exception_payload(vcpu); + if (vcpu->arch.dr7 & DR7_GD) { + vcpu->arch.dr7 &= ~DR7_GD; + kvm_update_dr7(vcpu); + } } kvm_x86_ops->queue_exception(vcpu); From c4f55198c7c2b87909b166ffc2f6b68d9af6766c Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Tue, 16 Oct 2018 14:29:24 -0700 Subject: [PATCH 192/204] kvm: x86: Introduce KVM_CAP_EXCEPTION_PAYLOAD This is a per-VM capability which can be enabled by userspace so that the faulting linear address will be included with the information about a pending #PF in L2, and the "new DR6 bits" will be included with the information about a pending #DB in L2. With this capability enabled, the L1 hypervisor can now intercept #PF before CR2 is modified. Under VMX, the L1 hypervisor can now intercept #DB before DR6 and DR7 are modified. When userspace has enabled KVM_CAP_EXCEPTION_PAYLOAD, it should generally provide an appropriate payload when injecting a #PF or #DB exception via KVM_SET_VCPU_EVENTS. However, to support restoring old checkpoints, this payload is not required. Note that bit 16 of the "new DR6 bits" is set to indicate that a debug exception (#DB) or a breakpoint exception (#BP) occurred inside an RTM region while advanced debugging of RTM transactional regions was enabled. This is the reverse of DR6.RTM, which is cleared in this scenario. This capability also enables exception.pending in struct kvm_vcpu_events, which allows userspace to distinguish between pending and injected exceptions. Reported-by: Jim Mattson Suggested-by: Paolo Bonzini Signed-off-by: Jim Mattson Signed-off-by: Paolo Bonzini --- Documentation/virtual/kvm/api.txt | 27 ++++++++++++++++++++++++++- arch/x86/kvm/x86.c | 5 +++++ include/uapi/linux/kvm.h | 1 + 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index e900ac31501c..07e87a7c665d 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -4568,7 +4568,7 @@ hpage module parameter is not set to 1, -EINVAL is returned. While it is generally possible to create a huge page backed VM without this capability, the VM will not be able to run. -7.14 KVM_CAP_MSR_PLATFORM_INFO +7.15 KVM_CAP_MSR_PLATFORM_INFO Architectures: x86 Parameters: args[0] whether feature should be enabled or not @@ -4591,6 +4591,31 @@ state). Enabling this capability on a VM depends on the CPU having the necessary functionality and on the facility being enabled with a kvm-hv module parameter. +7.17 KVM_CAP_EXCEPTION_PAYLOAD + +Architectures: x86 +Parameters: args[0] whether feature should be enabled or not + +With this capability enabled, CR2 will not be modified prior to the +emulated VM-exit when L1 intercepts a #PF exception that occurs in +L2. Similarly, for kvm-intel only, DR6 will not be modified prior to +the emulated VM-exit when L1 intercepts a #DB exception that occurs in +L2. As a result, when KVM_GET_VCPU_EVENTS reports a pending #PF (or +#DB) exception for L2, exception.has_payload will be set and the +faulting address (or the new DR6 bits*) will be reported in the +exception_payload field. Similarly, when userspace injects a #PF (or +#DB) into L2 using KVM_SET_VCPU_EVENTS, it is expected to set +exception.has_payload and to put the faulting address (or the new DR6 +bits*) in the exception_payload field. + +This capability also enables exception.pending in struct +kvm_vcpu_events, which allows userspace to distinguish between pending +and injected exceptions. + + +* For the new DR6 bits, note that bit 16 is set iff the #DB exception + will clear DR6.RTM. + 8. Other capabilities. ---------------------- diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index bd4e402b2e79..bdcb5babfb68 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3015,6 +3015,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_IMMEDIATE_EXIT: case KVM_CAP_GET_MSR_FEATURES: case KVM_CAP_MSR_PLATFORM_INFO: + case KVM_CAP_EXCEPTION_PAYLOAD: r = 1; break; case KVM_CAP_SYNC_REGS: @@ -4500,6 +4501,10 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, kvm->arch.guest_can_read_msr_platform_info = cap->args[0]; r = 0; break; + case KVM_CAP_EXCEPTION_PAYLOAD: + kvm->arch.exception_payload_enabled = cap->args[0]; + r = 0; + break; default: r = -EINVAL; break; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index a2f2b8845502..cb6d44e1fe02 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -964,6 +964,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_HYPERV_SEND_IPI 161 #define KVM_CAP_COALESCED_PIO 162 #define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163 +#define KVM_CAP_EXCEPTION_PAYLOAD 164 #ifdef KVM_CAP_IRQ_ROUTING From 43ce76ce731ff7f9cff51124fee738018c18a864 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Wed, 17 Oct 2018 16:46:57 +0200 Subject: [PATCH 193/204] KVM/x86: Use 32bit xor to clear registers in svm.c x86_64 zero-extends 32bit xor operation to a full 64bit register. Also add a comment and remove unnecessary instruction suffix in vmx.c Signed-off-by: Uros Bizjak Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm.c | 28 +++++++++++++--------------- arch/x86/kvm/vmx.c | 6 +++++- 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 1086bebaeea3..f416f5c7f2ae 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -5637,26 +5637,24 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) "mov %%r13, %c[r13](%[svm]) \n\t" "mov %%r14, %c[r14](%[svm]) \n\t" "mov %%r15, %c[r15](%[svm]) \n\t" -#endif /* * Clear host registers marked as clobbered to prevent * speculative use. */ - "xor %%" _ASM_BX ", %%" _ASM_BX " \n\t" - "xor %%" _ASM_CX ", %%" _ASM_CX " \n\t" - "xor %%" _ASM_DX ", %%" _ASM_DX " \n\t" - "xor %%" _ASM_SI ", %%" _ASM_SI " \n\t" - "xor %%" _ASM_DI ", %%" _ASM_DI " \n\t" -#ifdef CONFIG_X86_64 - "xor %%r8, %%r8 \n\t" - "xor %%r9, %%r9 \n\t" - "xor %%r10, %%r10 \n\t" - "xor %%r11, %%r11 \n\t" - "xor %%r12, %%r12 \n\t" - "xor %%r13, %%r13 \n\t" - "xor %%r14, %%r14 \n\t" - "xor %%r15, %%r15 \n\t" + "xor %%r8d, %%r8d \n\t" + "xor %%r9d, %%r9d \n\t" + "xor %%r10d, %%r10d \n\t" + "xor %%r11d, %%r11d \n\t" + "xor %%r12d, %%r12d \n\t" + "xor %%r13d, %%r13d \n\t" + "xor %%r14d, %%r14d \n\t" + "xor %%r15d, %%r15d \n\t" #endif + "xor %%ebx, %%ebx \n\t" + "xor %%ecx, %%ecx \n\t" + "xor %%edx, %%edx \n\t" + "xor %%esi, %%esi \n\t" + "xor %%edi, %%edi \n\t" "pop %%" _ASM_BP : : [svm]"a"(svm), diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 91f4f5288150..3677609145a4 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -62,7 +62,7 @@ #define __ex(x) __kvm_handle_fault_on_reboot(x) #define __ex_clear(x, reg) \ - ____kvm_handle_fault_on_reboot(x, "xorl " reg " , " reg) + ____kvm_handle_fault_on_reboot(x, "xor " reg ", " reg) MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); @@ -11285,6 +11285,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) "mov %%r13, %c[r13](%0) \n\t" "mov %%r14, %c[r14](%0) \n\t" "mov %%r15, %c[r15](%0) \n\t" + /* + * Clear host registers marked as clobbered to prevent + * speculative use. + */ "xor %%r8d, %%r8d \n\t" "xor %%r9d, %%r9d \n\t" "xor %%r10d, %%r10d \n\t" From 1e58e5e59148916fa43444a406335a990783fb78 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 17 Oct 2018 00:55:22 +0200 Subject: [PATCH 194/204] KVM: VMX: enable nested virtualization by default With live migration support and finally a good solution for exception event injection, nested VMX should be ready for having a stable userspace ABI. The results of syzkaller fuzzing are not perfect but not horrible either (and might be partially due to running on GCE, so that effectively we're testing three-level nesting on a fork of upstream KVM!). Enabling it by default seems like a nice way to conclude the 4.20 pull request. :) Unfortunately, enabling nested SVM in 2009 (commit 4b6e4dca701) was a bit premature. However, until live migration support is in place we can reasonably expect that it does not offer much in terms of ABI guarantees. Therefore we are still in time to break things and conform as much as possible to the interface used for VMX. Suggested-by: Jim Mattson Suggested-by: Liran Alon Reviewed-by: Liran Alon Celebrated-by: Liran Alon Celebrated-by: Wanpeng Li Celebrated-by: Wincy Van Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 3677609145a4..abeeb45d1c33 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -108,7 +108,7 @@ module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); * VMX and be a hypervisor for its own guests. If nested=0, guests may not * use VMX instructions. */ -static bool __read_mostly nested = 0; +static bool __read_mostly nested = 1; module_param(nested, bool, S_IRUGO); static bool __read_mostly nested_early_check = 0; From da5a3ce66b8bb51b0ea8a89f42aac153903f90fb Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 17 Oct 2018 17:42:10 +0100 Subject: [PATCH 195/204] KVM: arm64: Fix caching of host MDCR_EL2 value At boot time, KVM stashes the host MDCR_EL2 value, but only does this when the kernel is not running in hyp mode (i.e. is non-VHE). In these cases, the stashed value of MDCR_EL2.HPMN happens to be zero, which can lead to CONSTRAINED UNPREDICTABLE behaviour. Since we use this value to derive the MDCR_EL2 value when switching to/from a guest, after a guest have been run, the performance counters do not behave as expected. This has been observed to result in accesses via PMXEVTYPER_EL0 and PMXEVCNTR_EL0 not affecting the relevant counters, resulting in events not being counted. In these cases, only the fixed-purpose cycle counter appears to work as expected. Fix this by always stashing the host MDCR_EL2 value, regardless of VHE. Cc: Christopher Dall Cc: James Morse Cc: Will Deacon Cc: stable@vger.kernel.org Fixes: 1e947bad0b63b351 ("arm64: KVM: Skip HYP setup when already running in HYP") Tested-by: Robin Murphy Signed-off-by: Mark Rutland Signed-off-by: Marc Zyngier --- virt/kvm/arm/arm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 4ce99bb223bc..4c5ff06b18f9 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -1296,8 +1296,6 @@ static void cpu_init_hyp_mode(void *dummy) __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr); __cpu_init_stage2(); - - kvm_arm_init_debug(); } static void cpu_hyp_reset(void) @@ -1315,6 +1313,8 @@ static void cpu_hyp_reinit(void) else cpu_init_hyp_mode(NULL); + kvm_arm_init_debug(); + if (vgic_present) kvm_vgic_init_cpu_hardware(); } From 375bdd3b5d4f7cf146f0df1488b4671b141dd799 Mon Sep 17 00:00:00 2001 From: Dongjiu Geng Date: Sat, 13 Oct 2018 00:12:48 +0800 Subject: [PATCH 196/204] arm/arm64: KVM: Rename function kvm_arch_dev_ioctl_check_extension() Rename kvm_arch_dev_ioctl_check_extension() to kvm_arch_vm_ioctl_check_extension(), because it does not have any relationship with device. Renaming this function can make code readable. Cc: James Morse Reviewed-by: Suzuki K Poulose Signed-off-by: Dongjiu Geng Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 2 +- arch/arm64/include/asm/kvm_host.h | 2 +- arch/arm64/kvm/reset.c | 4 ++-- virt/kvm/arm/arm.c | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index b45af481ccf7..5ca5d9af0c26 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -273,7 +273,7 @@ static inline void __cpu_init_stage2(void) kvm_call_hyp(__init_stage2_translation); } -static inline int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext) +static inline int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) { return 0; } diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 376a5b695467..f84052f306af 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -53,7 +53,7 @@ DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); -int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext); +int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext); void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start); struct kvm_arch { diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index aa806d582552..337d2fbc2f06 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -59,12 +59,12 @@ static bool cpu_has_32bit_el1(void) } /** - * kvm_arch_dev_ioctl_check_extension + * kvm_arch_vm_ioctl_check_extension * * We currently assume that the number of HW registers is uniform * across all CPUs (see cpuinfo_sanity_check). */ -int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext) +int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) { int r; diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 4c5ff06b18f9..7c9d7b51ce89 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -241,7 +241,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = 1; break; default: - r = kvm_arch_dev_ioctl_check_extension(kvm, ext); + r = kvm_arch_vm_ioctl_check_extension(kvm, ext); break; } return r; From 58bf437ff64eac8aca606e42d7e4623e40b61fa1 Mon Sep 17 00:00:00 2001 From: Dongjiu Geng Date: Sat, 13 Oct 2018 00:12:49 +0800 Subject: [PATCH 197/204] arm/arm64: KVM: Enable 32 bits kvm vcpu events support The commit 539aee0edb9f ("KVM: arm64: Share the parts of get/set events useful to 32bit") shares the get/set events helper for arm64 and arm32, but forgot to share the cap extension code. User space will check whether KVM supports vcpu events by checking the KVM_CAP_VCPU_EVENTS extension Acked-by: James Morse Reviewed-by : Suzuki K Poulose Signed-off-by: Dongjiu Geng Signed-off-by: Marc Zyngier --- arch/arm64/kvm/reset.c | 1 - virt/kvm/arm/arm.c | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 337d2fbc2f06..b72a3dd56204 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -86,7 +86,6 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) break; case KVM_CAP_SET_GUEST_DEBUG: case KVM_CAP_VCPU_ATTRIBUTES: - case KVM_CAP_VCPU_EVENTS: r = 1; break; case KVM_CAP_ARM_VM_IPA_SIZE: diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 7c9d7b51ce89..11b98b2b0486 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -213,6 +213,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_READONLY_MEM: case KVM_CAP_MP_STATE: case KVM_CAP_IMMEDIATE_EXIT: + case KVM_CAP_VCPU_EVENTS: r = 1; break; case KVM_CAP_ARM_SET_DEVICE_ADDR: From 8d9fcacff9dc4445de4ced2dbe63b792174206d5 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 19 Oct 2018 20:44:04 +1100 Subject: [PATCH 198/204] KVM: PPC: Book3S HV: Don't use streamlined entry path on early POWER9 chips This disables the use of the streamlined entry path for radix guests on early POWER9 chips that need the workaround added in commit a25bd72badfa ("powerpc/mm/radix: Workaround prefetch issue with KVM", 2017-07-24), because the streamlined entry path does not include that workaround. This also means that we can't do nested HV-KVM on those chips. Since the chips that need that workaround are the same ones that can't run both radix and HPT guests at the same time on different threads of a core, we use the existing 'no_mixing_hpt_and_radix' variable that identifies those chips to identify when we can't use the new guest entry path, and when we can't do nested virtualization. Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 788bc61bd08c..bf8def2159c3 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4174,7 +4174,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; do { - if (kvm->arch.threads_indep && kvm_is_radix(kvm)) + /* + * The early POWER9 chips that can't mix radix and HPT threads + * on the same core also need the workaround for the problem + * where the TLB would prefetch entries in the guest exit path + * for radix guests using the guest PIDR value and LPID 0. + * The workaround is in the old path (kvmppc_run_vcpu()) + * but not the new path (kvmhv_run_single_vcpu()). + */ + if (kvm->arch.threads_indep && kvm_is_radix(kvm) && + !no_mixing_hpt_and_radix) r = kvmhv_run_single_vcpu(run, vcpu, ~(u64)0, vcpu->arch.vcore->lpcr); else @@ -5196,7 +5205,7 @@ static int kvmhv_enable_nested(struct kvm *kvm) { if (!nested) return -EPERM; - if (!cpu_has_feature(CPU_FTR_ARCH_300)) + if (!cpu_has_feature(CPU_FTR_ARCH_300) || no_mixing_hpt_and_radix) return -ENODEV; /* kvm == NULL means the caller is testing if the capability exists */ From e4e11cc0f81ee7be17d6f6fb96128a6d51c0e838 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Wed, 17 Oct 2018 20:21:16 +0200 Subject: [PATCH 199/204] KVM: arm64: Safety check PSTATE when entering guest and handle IL This commit adds a paranoid check when entering the guest to make sure we don't attempt running guest code in an equally or more privilged mode than the hypervisor. We also catch other accidental programming of the SPSR_EL2 which results in an illegal exception return and report this safely back to the user. Signed-off-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_asm.h | 1 + arch/arm64/include/asm/ptrace.h | 3 +++ arch/arm64/kvm/handle_exit.c | 7 +++++++ arch/arm64/kvm/hyp/hyp-entry.S | 16 +++++++++++++++- arch/arm64/kvm/hyp/sysreg-sr.c | 19 ++++++++++++++++++- 5 files changed, 44 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 0b53c72e7591..aea01a09eb94 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -30,6 +30,7 @@ #define ARM_EXCEPTION_IRQ 0 #define ARM_EXCEPTION_EL1_SERROR 1 #define ARM_EXCEPTION_TRAP 2 +#define ARM_EXCEPTION_IL 3 /* The hyp-stub will return this for any kvm_call_hyp() call */ #define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 177b851ca6d9..ff35ac1258eb 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -25,6 +25,9 @@ #define CurrentEL_EL1 (1 << 2) #define CurrentEL_EL2 (2 << 2) +/* Additional SPSR bits not exposed in the UABI */ +#define PSR_IL_BIT (1 << 20) + /* AArch32-specific ptrace requests */ #define COMPAT_PTRACE_GETREGS 12 #define COMPAT_PTRACE_SETREGS 13 diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index e5e741bfffe1..35a81bebd02b 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -284,6 +284,13 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, */ run->exit_reason = KVM_EXIT_FAIL_ENTRY; return 0; + case ARM_EXCEPTION_IL: + /* + * We attempted an illegal exception return. Guest state must + * have been corrupted somehow. Give up. + */ + run->exit_reason = KVM_EXIT_FAIL_ENTRY; + return -EINVAL; default: kvm_pr_unimpl("Unsupported exception type: %d", exception_index); diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 24b4fbafe3e4..b1f14f736962 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -162,6 +162,20 @@ el1_error: mov x0, #ARM_EXCEPTION_EL1_SERROR b __guest_exit +el2_sync: + /* Check for illegal exception return, otherwise panic */ + mrs x0, spsr_el2 + + /* if this was something else, then panic! */ + tst x0, #PSR_IL_BIT + b.eq __hyp_panic + + /* Let's attempt a recovery from the illegal exception return */ + get_vcpu_ptr x1, x0 + mov x0, #ARM_EXCEPTION_IL + b __guest_exit + + el2_error: ldp x0, x1, [sp], #16 @@ -240,7 +254,7 @@ ENTRY(__kvm_hyp_vector) invalid_vect el2t_fiq_invalid // FIQ EL2t invalid_vect el2t_error_invalid // Error EL2t - invalid_vect el2h_sync_invalid // Synchronous EL2h + valid_vect el2_sync // Synchronous EL2h invalid_vect el2h_irq_invalid // IRQ EL2h invalid_vect el2h_fiq_invalid // FIQ EL2h valid_vect el2_error // Error EL2h diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 9ce223944983..8dc285318204 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -152,8 +152,25 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) static void __hyp_text __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt) { + u64 pstate = ctxt->gp_regs.regs.pstate; + u64 mode = pstate & PSR_AA32_MODE_MASK; + + /* + * Safety check to ensure we're setting the CPU up to enter the guest + * in a less privileged mode. + * + * If we are attempting a return to EL2 or higher in AArch64 state, + * program SPSR_EL2 with M=EL2h and the IL bit set which ensures that + * we'll take an illegal exception state exception immediately after + * the ERET to the guest. Attempts to return to AArch32 Hyp will + * result in an illegal exception return because EL2's execution state + * is determined by SCR_EL3.RW. + */ + if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t) + pstate = PSR_MODE_EL2h | PSR_IL_BIT; + write_sysreg_el2(ctxt->gp_regs.regs.pc, elr); - write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr); + write_sysreg_el2(pstate, spsr); if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2); From f15ac811e80b857a443476de60ce70d3e6049ae5 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Fri, 19 Oct 2018 16:38:16 +0200 Subject: [PATCH 200/204] selftests/kvm: add missing executables to .gitignore Fixes: 18178ff86217 ("KVM: selftests: add Enlightened VMCS test") Signed-off-by: Anders Roxell Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index 552b0d9a49f4..6210ba41c29e 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -1,4 +1,5 @@ /x86_64/cr4_cpuid_sync_test +/x86_64/evmcs_test /x86_64/platform_info_test /x86_64/set_sregs_test /x86_64/sync_regs_test From cbe3f898d159f4a6ed12121672c823b780df32d3 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 19 Oct 2018 16:16:03 +0200 Subject: [PATCH 201/204] x86/kvm/nVMX: tweak shadow fields It seems we have some leftovers from times when 'unrestricted guest' wasn't exposed to L1. Stop shadowing GUEST_CS_{BASE,LIMIT,AR_SELECTOR} and GUEST_ES_BASE, shadow GUEST_SS_AR_BYTES as it was found that some hypervisors (e.g. Hyper-V without Enlightened VMCS) access it pretty often. Suggested-by: Paolo Bonzini Signed-off-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 10 +++++----- arch/x86/kvm/vmx_shadow_fields.h | 5 +---- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index abeeb45d1c33..641a65b30685 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -12715,6 +12715,7 @@ static void prepare_vmcs02_full(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) { vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); + vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); @@ -12722,6 +12723,7 @@ static void prepare_vmcs02_full(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); + vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); @@ -12731,12 +12733,13 @@ static void prepare_vmcs02_full(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); - vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); + vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); + vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); @@ -12838,11 +12841,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, */ if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) { - vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); - vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); - vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); - vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); + vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); } if (vmx->nested.nested_run_pending && diff --git a/arch/x86/kvm/vmx_shadow_fields.h b/arch/x86/kvm/vmx_shadow_fields.h index cd0c75f6d037..132432f375c2 100644 --- a/arch/x86/kvm/vmx_shadow_fields.h +++ b/arch/x86/kvm/vmx_shadow_fields.h @@ -28,7 +28,6 @@ */ /* 16-bits */ -SHADOW_FIELD_RW(GUEST_CS_SELECTOR) SHADOW_FIELD_RW(GUEST_INTR_STATUS) SHADOW_FIELD_RW(GUEST_PML_INDEX) SHADOW_FIELD_RW(HOST_FS_SELECTOR) @@ -47,8 +46,8 @@ SHADOW_FIELD_RW(VM_ENTRY_EXCEPTION_ERROR_CODE) SHADOW_FIELD_RW(VM_ENTRY_INTR_INFO_FIELD) SHADOW_FIELD_RW(VM_ENTRY_INSTRUCTION_LEN) SHADOW_FIELD_RW(TPR_THRESHOLD) -SHADOW_FIELD_RW(GUEST_CS_LIMIT) SHADOW_FIELD_RW(GUEST_CS_AR_BYTES) +SHADOW_FIELD_RW(GUEST_SS_AR_BYTES) SHADOW_FIELD_RW(GUEST_INTERRUPTIBILITY_INFO) SHADOW_FIELD_RW(VMX_PREEMPTION_TIMER_VALUE) @@ -61,8 +60,6 @@ SHADOW_FIELD_RW(GUEST_CR0) SHADOW_FIELD_RW(GUEST_CR3) SHADOW_FIELD_RW(GUEST_CR4) SHADOW_FIELD_RW(GUEST_RFLAGS) -SHADOW_FIELD_RW(GUEST_CS_BASE) -SHADOW_FIELD_RW(GUEST_ES_BASE) SHADOW_FIELD_RW(CR0_GUEST_HOST_MASK) SHADOW_FIELD_RW(CR0_READ_SHADOW) SHADOW_FIELD_RW(CR4_READ_SHADOW) From 6e301a8e56e429d6b01d83d427a9e54fdbb0fa60 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Mon, 15 Oct 2018 21:08:41 +1100 Subject: [PATCH 202/204] KVM: PPC: Optimize clearing TCEs for sparse tables The powernv platform maintains 2 TCE tables for VFIO - a hardware TCE table and a table with userspace addresses. These tables are radix trees, we allocate indirect levels when they are written to. Since the memory allocation is problematic in real mode, we have 2 accessors to the entries: - for virtual mode: it allocates the memory and it is always expected to return non-NULL; - fr real mode: it does not allocate and can return NULL. Also, DMA windows can span to up to 55 bits of the address space and since we never have this much RAM, such windows are sparse. However currently the SPAPR TCE IOMMU driver walks through all TCEs to unpin DMA memory. Since we maintain a userspace addresses table for VFIO which is a mirror of the hardware table, we can use it to know which parts of the DMA window have not been mapped and skip these so does this patch. The bare metal systems do not have this problem as they use a bypass mode of a PHB which maps RAM directly. This helps a lot with sparse DMA windows, reducing the shutdown time from about 3 minutes per 1 billion TCEs to a few seconds for 32GB sparse guest. Just skipping the last level seems to be good enough. As non-allocating accessor is used now in virtual mode as well, rename it from IOMMU_TABLE_USERSPACE_ENTRY_RM (real mode) to _RO (read only). Signed-off-by: Alexey Kardashevskiy Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/iommu.h | 2 +- arch/powerpc/kvm/book3s_64_vio.c | 5 ++--- arch/powerpc/kvm/book3s_64_vio_hv.c | 6 +++--- drivers/vfio/vfio_iommu_spapr_tce.c | 23 +++++++++++++++++++++-- 4 files changed, 27 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index 3d4b88cb8599..35db0cbc9222 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -126,7 +126,7 @@ struct iommu_table { int it_nid; }; -#define IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry) \ +#define IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry) \ ((tbl)->it_ops->useraddrptr((tbl), (entry), false)) #define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \ ((tbl)->it_ops->useraddrptr((tbl), (entry), true)) diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index c0c64d11cc71..62a8d03ba7e9 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -410,11 +410,10 @@ static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm, { struct mm_iommu_table_group_mem_t *mem = NULL; const unsigned long pgsize = 1ULL << tbl->it_page_shift; - __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); + __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); if (!pua) - /* it_userspace allocation might be delayed */ - return H_TOO_HARD; + return H_SUCCESS; mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize); if (!mem) diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index ec99363fdf54..2206bc729b9a 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -214,7 +214,7 @@ static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl, if (!ret && ((*direction == DMA_FROM_DEVICE) || (*direction == DMA_BIDIRECTIONAL))) { - __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry); + __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); /* * kvmppc_rm_tce_iommu_do_map() updates the UA cache after * calling this so we still get here a valid UA. @@ -240,7 +240,7 @@ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm, { struct mm_iommu_table_group_mem_t *mem = NULL; const unsigned long pgsize = 1ULL << tbl->it_page_shift; - __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry); + __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); if (!pua) /* it_userspace allocation might be delayed */ @@ -304,7 +304,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, { long ret; unsigned long hpa = 0; - __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry); + __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); struct mm_iommu_table_group_mem_t *mem; if (!pua) diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 96721b154454..b30926e11d87 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -444,7 +444,7 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container, struct mm_iommu_table_group_mem_t *mem = NULL; int ret; unsigned long hpa = 0; - __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); + __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); if (!pua) return; @@ -467,8 +467,27 @@ static int tce_iommu_clear(struct tce_container *container, unsigned long oldhpa; long ret; enum dma_data_direction direction; + unsigned long lastentry = entry + pages; + + for ( ; entry < lastentry; ++entry) { + if (tbl->it_indirect_levels && tbl->it_userspace) { + /* + * For multilevel tables, we can take a shortcut here + * and skip some TCEs as we know that the userspace + * addresses cache is a mirror of the real TCE table + * and if it is missing some indirect levels, then + * the hardware table does not have them allocated + * either and therefore does not require updating. + */ + __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, + entry); + if (!pua) { + /* align to level_size which is power of two */ + entry |= tbl->it_level_size - 1; + continue; + } + } - for ( ; pages; --pages, ++entry) { cond_resched(); direction = DMA_NONE; From f9dcf08e2017cad12d771cdd862c8c314b885615 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= Date: Tue, 23 Oct 2018 16:31:38 +0200 Subject: [PATCH 203/204] Revert "kvm: x86: optimize dr6 restore" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 0e0a53c551317654e2d7885fdfd23299fee99b6b. As Christian Ehrhardt noted: The most common case is that vcpu->arch.dr6 and the host's %dr6 value are not related at all because ->switch_db_regs is zero. To do this all correctly, we must handle the case where the guest leaves an arbitrary unused value in vcpu->arch.dr6 before disabling breakpoints again. However, this means that vcpu->arch.dr6 is not suitable to detect the need for a %dr6 clear. Signed-off-by: Radim Krčmář --- arch/x86/kvm/x86.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index bdcb5babfb68..66d66d77caee 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3271,16 +3271,11 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) kvm_x86_ops->vcpu_put(vcpu); vcpu->arch.last_host_tsc = rdtsc(); /* - * Here dr6 is either zero or, if the guest has run and userspace - * has not set any breakpoints or watchpoints, it can be set to - * the guest dr6 (stored in vcpu->arch.dr6). do_debug expects dr6 - * to be cleared after it runs, so clear the host register. However, - * MOV to DR can be expensive when running nested, omit it if - * vcpu->arch.dr6 is already zero: in that case, the host dr6 cannot - * currently be nonzero. + * If userspace has set any breakpoints or watchpoints, dr6 is restored + * on every vmexit, but if not, we might have a stale dr6 from the + * guest. do_debug expects dr6 to be cleared after it runs, do the same. */ - if (vcpu->arch.dr6) - set_debugreg(0, 6); + set_debugreg(0, 6); } static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, From 22a7cdcae6a4a3c8974899e62851d270956f58ce Mon Sep 17 00:00:00 2001 From: KarimAllah Ahmed Date: Sat, 20 Oct 2018 23:42:59 +0200 Subject: [PATCH 204/204] KVM/nVMX: Do not validate that posted_intr_desc_addr is page aligned MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The spec only requires the posted interrupt descriptor address to be 64-bytes aligned (i.e. bits[0:5] == 0). Using page_address_valid also forces the address to be page aligned. Only validate that the address does not cross the maximum physical address without enforcing a page alignment. Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: H. Peter Anvin Cc: x86@kernel.org Cc: kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org Fixes: 6de84e581c0 ("nVMX x86: check posted-interrupt descriptor addresss on vmentry of L2") Signed-off-by: KarimAllah Ahmed Reviewed-by: Jim Mattson Reviewed-by: Krish Sadhuhan Signed-off-by: Radim Krčmář --- arch/x86/kvm/vmx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 641a65b30685..ccc6a01eb4f4 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -12206,7 +12206,7 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, !nested_exit_intr_ack_set(vcpu) || (vmcs12->posted_intr_nv & 0xff00) || (vmcs12->posted_intr_desc_addr & 0x3f) || - (!page_address_valid(vcpu, vmcs12->posted_intr_desc_addr)))) + (vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu)))) return -EINVAL; /* tpr shadow is needed by all apicv features. */