- Yet another fix for non-CPU accesses to the memory backing
   the VGICv3 subsystem
 
 - A set of fixes for the setlftest checking for the S1PTW
   behaviour after the fix that went in ealier in the cycle
 -----BEGIN PGP SIGNATURE-----
 
 iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmPeZxwUHHBib256aW5p
 QHJlZGhhdC5jb20ACgkQv/vSX3jHroMh7Af/W3CX09KfdKYidbnF4Vx5rkaBaseP
 ox77Qco8j6DcC3fazs5TZtiN7pOSf6488VmbS43HkfZUoNAWmXn8tvwvX+a4pNE9
 yYjXQYRzOjyShPJIsBehlocn3F5G1KvZ8rVl5deVjBRtY5Vd1NzliFPL/EVZiVBk
 l6IHi0rawUTaSGJ9ZLBglQ9wEuAGv1R0SkjhwlXQ6AbupKB1n3tvgOSlKWbwP9fZ
 qJN0mOy38lp2YNRtsoZPiyf9AxGXd9XX6twQ4bhDr9h540HrbzzFh8mWu536X7mg
 dhWwggHidzN8Jzih2TnPByqorGIiJ4ab1iU6udp2X0oiaacfoIFzh8vrlA==
 =7/HY
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
 "ARM64:

   - Yet another fix for non-CPU accesses to the memory backing the
     VGICv3 subsystem

   - A set of fixes for the setlftest checking for the S1PTW behaviour
     after the fix that went in ealier in the cycle"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: selftests: aarch64: Test read-only PT memory regions
  KVM: selftests: aarch64: Fix check of dirty log PT write
  KVM: selftests: aarch64: Do not default to dirty PTE pages on all S1PTWs
  KVM: selftests: aarch64: Relax userfaultfd read vs. write checks
  KVM: arm64: Allow no running vcpu on saving vgic3 pending table
  KVM: arm64: Allow no running vcpu on restoring vgic3 LPI pending status
  KVM: arm64: Add helper vgic_write_guest_lock()
This commit is contained in:
Linus Torvalds 2023-02-04 11:21:27 -08:00
commit c00f4ddde0
6 changed files with 132 additions and 98 deletions

View File

@ -8070,9 +8070,13 @@ considering the state as complete. VMM needs to ensure that the dirty
state is final and avoid missing dirty pages from another ioctl ordered
after the bitmap collection.
NOTE: One example of using the backup bitmap is saving arm64 vgic/its
tables through KVM_DEV_ARM_{VGIC_GRP_CTRL, ITS_SAVE_TABLES} command on
KVM device "kvm-arm-vgic-its" when dirty ring is enabled.
NOTE: Multiple examples of using the backup bitmap: (1) save vgic/its
tables through command KVM_DEV_ARM_{VGIC_GRP_CTRL, ITS_SAVE_TABLES} on
KVM device "kvm-arm-vgic-its". (2) restore vgic/its tables through
command KVM_DEV_ARM_{VGIC_GRP_CTRL, ITS_RESTORE_TABLES} on KVM device
"kvm-arm-vgic-its". VGICv3 LPI pending status is restored. (3) save
vgic3 pending table through KVM_DEV_ARM_VGIC_{GRP_CTRL, SAVE_PENDING_TABLES}
command on KVM device "kvm-arm-vgic-v3".
8.30 KVM_CAP_XEN_HVM
--------------------

View File

@ -2187,7 +2187,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
ite->collection->collection_id;
val = cpu_to_le64(val);
return kvm_write_guest_lock(kvm, gpa, &val, ite_esz);
return vgic_write_guest_lock(kvm, gpa, &val, ite_esz);
}
/**
@ -2339,7 +2339,7 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
(itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
(dev->num_eventid_bits - 1));
val = cpu_to_le64(val);
return kvm_write_guest_lock(kvm, ptr, &val, dte_esz);
return vgic_write_guest_lock(kvm, ptr, &val, dte_esz);
}
/**
@ -2526,7 +2526,7 @@ static int vgic_its_save_cte(struct vgic_its *its,
((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
collection->collection_id);
val = cpu_to_le64(val);
return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
return vgic_write_guest_lock(its->dev->kvm, gpa, &val, esz);
}
/*
@ -2607,7 +2607,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
*/
val = 0;
BUG_ON(cte_esz > sizeof(val));
ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
ret = vgic_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
return ret;
}
@ -2743,7 +2743,6 @@ static int vgic_its_has_attr(struct kvm_device *dev,
static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
{
const struct vgic_its_abi *abi = vgic_its_get_abi(its);
struct vgic_dist *dist = &kvm->arch.vgic;
int ret = 0;
if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */
@ -2763,9 +2762,7 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
vgic_its_reset(kvm, its);
break;
case KVM_DEV_ARM_ITS_SAVE_TABLES:
dist->save_its_tables_in_progress = true;
ret = abi->save_tables(its);
dist->save_its_tables_in_progress = false;
break;
case KVM_DEV_ARM_ITS_RESTORE_TABLES:
ret = abi->restore_tables(its);
@ -2792,7 +2789,7 @@ bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
return dist->save_its_tables_in_progress;
return dist->table_write_in_progress;
}
static int vgic_its_set_attr(struct kvm_device *dev,

View File

@ -339,7 +339,7 @@ retry:
if (status) {
/* clear consumed data */
val &= ~(1 << bit_nr);
ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
ret = vgic_write_guest_lock(kvm, ptr, &val, 1);
if (ret)
return ret;
}
@ -434,7 +434,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
else
val &= ~(1 << bit_nr);
ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
ret = vgic_write_guest_lock(kvm, ptr, &val, 1);
if (ret)
goto out;
}

View File

@ -6,6 +6,7 @@
#define __KVM_ARM_VGIC_NEW_H__
#include <linux/irqchip/arm-gic-common.h>
#include <asm/kvm_mmu.h>
#define PRODUCT_ID_KVM 0x4b /* ASCII code K */
#define IMPLEMENTER_ARM 0x43b
@ -131,6 +132,19 @@ static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq)
return vgic_irq_get_lr_count(irq) > 1;
}
static inline int vgic_write_guest_lock(struct kvm *kvm, gpa_t gpa,
const void *data, unsigned long len)
{
struct vgic_dist *dist = &kvm->arch.vgic;
int ret;
dist->table_write_in_progress = true;
ret = kvm_write_guest_lock(kvm, gpa, data, len);
dist->table_write_in_progress = false;
return ret;
}
/*
* This struct provides an intermediate representation of the fields contained
* in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC

View File

@ -263,7 +263,7 @@ struct vgic_dist {
struct vgic_io_device dist_iodev;
bool has_its;
bool save_its_tables_in_progress;
bool table_write_in_progress;
/*
* Contains the attributes and gpa of the LPI configuration table.

View File

@ -237,6 +237,11 @@ static void guest_check_s1ptw_wr_in_dirty_log(void)
GUEST_SYNC(CMD_CHECK_S1PTW_WR_IN_DIRTY_LOG);
}
static void guest_check_no_s1ptw_wr_in_dirty_log(void)
{
GUEST_SYNC(CMD_CHECK_NO_S1PTW_WR_IN_DIRTY_LOG);
}
static void guest_exec(void)
{
int (*code)(void) = (int (*)(void))TEST_EXEC_GVA;
@ -304,7 +309,7 @@ static struct uffd_args {
/* Returns true to continue the test, and false if it should be skipped. */
static int uffd_generic_handler(int uffd_mode, int uffd, struct uffd_msg *msg,
struct uffd_args *args, bool expect_write)
struct uffd_args *args)
{
uint64_t addr = msg->arg.pagefault.address;
uint64_t flags = msg->arg.pagefault.flags;
@ -313,7 +318,6 @@ static int uffd_generic_handler(int uffd_mode, int uffd, struct uffd_msg *msg,
TEST_ASSERT(uffd_mode == UFFDIO_REGISTER_MODE_MISSING,
"The only expected UFFD mode is MISSING");
ASSERT_EQ(!!(flags & UFFD_PAGEFAULT_FLAG_WRITE), expect_write);
ASSERT_EQ(addr, (uint64_t)args->hva);
pr_debug("uffd fault: addr=%p write=%d\n",
@ -337,19 +341,14 @@ static int uffd_generic_handler(int uffd_mode, int uffd, struct uffd_msg *msg,
return 0;
}
static int uffd_pt_write_handler(int mode, int uffd, struct uffd_msg *msg)
static int uffd_pt_handler(int mode, int uffd, struct uffd_msg *msg)
{
return uffd_generic_handler(mode, uffd, msg, &pt_args, true);
return uffd_generic_handler(mode, uffd, msg, &pt_args);
}
static int uffd_data_write_handler(int mode, int uffd, struct uffd_msg *msg)
static int uffd_data_handler(int mode, int uffd, struct uffd_msg *msg)
{
return uffd_generic_handler(mode, uffd, msg, &data_args, true);
}
static int uffd_data_read_handler(int mode, int uffd, struct uffd_msg *msg)
{
return uffd_generic_handler(mode, uffd, msg, &data_args, false);
return uffd_generic_handler(mode, uffd, msg, &data_args);
}
static void setup_uffd_args(struct userspace_mem_region *region,
@ -471,9 +470,12 @@ static bool handle_cmd(struct kvm_vm *vm, int cmd)
{
struct userspace_mem_region *data_region, *pt_region;
bool continue_test = true;
uint64_t pte_gpa, pte_pg;
data_region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
pt_region = vm_get_mem_region(vm, MEM_REGION_PT);
pte_gpa = addr_hva2gpa(vm, virt_get_pte_hva(vm, TEST_GVA));
pte_pg = (pte_gpa - pt_region->region.guest_phys_addr) / getpagesize();
if (cmd == CMD_SKIP_TEST)
continue_test = false;
@ -486,13 +488,13 @@ static bool handle_cmd(struct kvm_vm *vm, int cmd)
TEST_ASSERT(check_write_in_dirty_log(vm, data_region, 0),
"Missing write in dirty log");
if (cmd & CMD_CHECK_S1PTW_WR_IN_DIRTY_LOG)
TEST_ASSERT(check_write_in_dirty_log(vm, pt_region, 0),
TEST_ASSERT(check_write_in_dirty_log(vm, pt_region, pte_pg),
"Missing s1ptw write in dirty log");
if (cmd & CMD_CHECK_NO_WRITE_IN_DIRTY_LOG)
TEST_ASSERT(!check_write_in_dirty_log(vm, data_region, 0),
"Unexpected write in dirty log");
if (cmd & CMD_CHECK_NO_S1PTW_WR_IN_DIRTY_LOG)
TEST_ASSERT(!check_write_in_dirty_log(vm, pt_region, 0),
TEST_ASSERT(!check_write_in_dirty_log(vm, pt_region, pte_pg),
"Unexpected s1ptw write in dirty log");
return continue_test;
@ -797,7 +799,7 @@ static void help(char *name)
.expected_events = { .uffd_faults = _uffd_faults, }, \
}
#define TEST_DIRTY_LOG(_access, _with_af, _test_check) \
#define TEST_DIRTY_LOG(_access, _with_af, _test_check, _pt_check) \
{ \
.name = SCAT3(dirty_log, _access, _with_af), \
.data_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
@ -805,13 +807,12 @@ static void help(char *name)
.guest_prepare = { _PREPARE(_with_af), \
_PREPARE(_access) }, \
.guest_test = _access, \
.guest_test_check = { _CHECK(_with_af), _test_check, \
guest_check_s1ptw_wr_in_dirty_log}, \
.guest_test_check = { _CHECK(_with_af), _test_check, _pt_check }, \
.expected_events = { 0 }, \
}
#define TEST_UFFD_AND_DIRTY_LOG(_access, _with_af, _uffd_data_handler, \
_uffd_faults, _test_check) \
_uffd_faults, _test_check, _pt_check) \
{ \
.name = SCAT3(uffd_and_dirty_log, _access, _with_af), \
.data_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
@ -820,16 +821,17 @@ static void help(char *name)
_PREPARE(_access) }, \
.guest_test = _access, \
.mem_mark_cmd = CMD_HOLE_DATA | CMD_HOLE_PT, \
.guest_test_check = { _CHECK(_with_af), _test_check }, \
.guest_test_check = { _CHECK(_with_af), _test_check, _pt_check }, \
.uffd_data_handler = _uffd_data_handler, \
.uffd_pt_handler = uffd_pt_write_handler, \
.uffd_pt_handler = uffd_pt_handler, \
.expected_events = { .uffd_faults = _uffd_faults, }, \
}
#define TEST_RO_MEMSLOT(_access, _mmio_handler, _mmio_exits) \
{ \
.name = SCAT3(ro_memslot, _access, _with_af), \
.name = SCAT2(ro_memslot, _access), \
.data_memslot_flags = KVM_MEM_READONLY, \
.pt_memslot_flags = KVM_MEM_READONLY, \
.guest_prepare = { _PREPARE(_access) }, \
.guest_test = _access, \
.mmio_handler = _mmio_handler, \
@ -840,6 +842,7 @@ static void help(char *name)
{ \
.name = SCAT2(ro_memslot_no_syndrome, _access), \
.data_memslot_flags = KVM_MEM_READONLY, \
.pt_memslot_flags = KVM_MEM_READONLY, \
.guest_test = _access, \
.fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \
.expected_events = { .fail_vcpu_runs = 1 }, \
@ -848,9 +851,9 @@ static void help(char *name)
#define TEST_RO_MEMSLOT_AND_DIRTY_LOG(_access, _mmio_handler, _mmio_exits, \
_test_check) \
{ \
.name = SCAT3(ro_memslot, _access, _with_af), \
.name = SCAT2(ro_memslot, _access), \
.data_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \
.pt_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
.pt_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \
.guest_prepare = { _PREPARE(_access) }, \
.guest_test = _access, \
.guest_test_check = { _test_check }, \
@ -862,7 +865,7 @@ static void help(char *name)
{ \
.name = SCAT2(ro_memslot_no_syn_and_dlog, _access), \
.data_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \
.pt_memslot_flags = KVM_MEM_LOG_DIRTY_PAGES, \
.pt_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \
.guest_test = _access, \
.guest_test_check = { _test_check }, \
.fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \
@ -874,11 +877,12 @@ static void help(char *name)
{ \
.name = SCAT2(ro_memslot_uffd, _access), \
.data_memslot_flags = KVM_MEM_READONLY, \
.pt_memslot_flags = KVM_MEM_READONLY, \
.mem_mark_cmd = CMD_HOLE_DATA | CMD_HOLE_PT, \
.guest_prepare = { _PREPARE(_access) }, \
.guest_test = _access, \
.uffd_data_handler = _uffd_data_handler, \
.uffd_pt_handler = uffd_pt_write_handler, \
.uffd_pt_handler = uffd_pt_handler, \
.mmio_handler = _mmio_handler, \
.expected_events = { .mmio_exits = _mmio_exits, \
.uffd_faults = _uffd_faults }, \
@ -889,10 +893,11 @@ static void help(char *name)
{ \
.name = SCAT2(ro_memslot_no_syndrome, _access), \
.data_memslot_flags = KVM_MEM_READONLY, \
.pt_memslot_flags = KVM_MEM_READONLY, \
.mem_mark_cmd = CMD_HOLE_DATA | CMD_HOLE_PT, \
.guest_test = _access, \
.uffd_data_handler = _uffd_data_handler, \
.uffd_pt_handler = uffd_pt_write_handler, \
.uffd_pt_handler = uffd_pt_handler, \
.fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \
.expected_events = { .fail_vcpu_runs = 1, \
.uffd_faults = _uffd_faults }, \
@ -933,44 +938,51 @@ static struct test_desc tests[] = {
* (S1PTW).
*/
TEST_UFFD(guest_read64, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
uffd_data_read_handler, uffd_pt_write_handler, 2),
/* no_af should also lead to a PT write. */
uffd_data_handler, uffd_pt_handler, 2),
TEST_UFFD(guest_read64, no_af, CMD_HOLE_DATA | CMD_HOLE_PT,
uffd_data_read_handler, uffd_pt_write_handler, 2),
/* Note how that cas invokes the read handler. */
uffd_data_handler, uffd_pt_handler, 2),
TEST_UFFD(guest_cas, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
uffd_data_read_handler, uffd_pt_write_handler, 2),
uffd_data_handler, uffd_pt_handler, 2),
/*
* Can't test guest_at with_af as it's IMPDEF whether the AF is set.
* The S1PTW fault should still be marked as a write.
*/
TEST_UFFD(guest_at, no_af, CMD_HOLE_DATA | CMD_HOLE_PT,
uffd_data_read_handler, uffd_pt_write_handler, 1),
uffd_no_handler, uffd_pt_handler, 1),
TEST_UFFD(guest_ld_preidx, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
uffd_data_read_handler, uffd_pt_write_handler, 2),
uffd_data_handler, uffd_pt_handler, 2),
TEST_UFFD(guest_write64, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
uffd_data_write_handler, uffd_pt_write_handler, 2),
uffd_data_handler, uffd_pt_handler, 2),
TEST_UFFD(guest_dc_zva, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
uffd_data_write_handler, uffd_pt_write_handler, 2),
uffd_data_handler, uffd_pt_handler, 2),
TEST_UFFD(guest_st_preidx, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
uffd_data_write_handler, uffd_pt_write_handler, 2),
uffd_data_handler, uffd_pt_handler, 2),
TEST_UFFD(guest_exec, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
uffd_data_read_handler, uffd_pt_write_handler, 2),
uffd_data_handler, uffd_pt_handler, 2),
/*
* Try accesses when the data and PT memory regions are both
* tracked for dirty logging.
*/
TEST_DIRTY_LOG(guest_read64, with_af, guest_check_no_write_in_dirty_log),
/* no_af should also lead to a PT write. */
TEST_DIRTY_LOG(guest_read64, no_af, guest_check_no_write_in_dirty_log),
TEST_DIRTY_LOG(guest_ld_preidx, with_af, guest_check_no_write_in_dirty_log),
TEST_DIRTY_LOG(guest_at, no_af, guest_check_no_write_in_dirty_log),
TEST_DIRTY_LOG(guest_exec, with_af, guest_check_no_write_in_dirty_log),
TEST_DIRTY_LOG(guest_write64, with_af, guest_check_write_in_dirty_log),
TEST_DIRTY_LOG(guest_cas, with_af, guest_check_write_in_dirty_log),
TEST_DIRTY_LOG(guest_dc_zva, with_af, guest_check_write_in_dirty_log),
TEST_DIRTY_LOG(guest_st_preidx, with_af, guest_check_write_in_dirty_log),
TEST_DIRTY_LOG(guest_read64, with_af, guest_check_no_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_DIRTY_LOG(guest_read64, no_af, guest_check_no_write_in_dirty_log,
guest_check_no_s1ptw_wr_in_dirty_log),
TEST_DIRTY_LOG(guest_ld_preidx, with_af,
guest_check_no_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_DIRTY_LOG(guest_at, no_af, guest_check_no_write_in_dirty_log,
guest_check_no_s1ptw_wr_in_dirty_log),
TEST_DIRTY_LOG(guest_exec, with_af, guest_check_no_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_DIRTY_LOG(guest_write64, with_af, guest_check_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_DIRTY_LOG(guest_cas, with_af, guest_check_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_DIRTY_LOG(guest_dc_zva, with_af, guest_check_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_DIRTY_LOG(guest_st_preidx, with_af, guest_check_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
/*
* Access when the data and PT memory regions are both marked for
@ -980,29 +992,43 @@ static struct test_desc tests[] = {
* fault, and nothing in the dirty log. Any S1PTW should result in
* a write in the dirty log and a userfaultfd write.
*/
TEST_UFFD_AND_DIRTY_LOG(guest_read64, with_af, uffd_data_read_handler, 2,
guest_check_no_write_in_dirty_log),
/* no_af should also lead to a PT write. */
TEST_UFFD_AND_DIRTY_LOG(guest_read64, no_af, uffd_data_read_handler, 2,
guest_check_no_write_in_dirty_log),
TEST_UFFD_AND_DIRTY_LOG(guest_ld_preidx, with_af, uffd_data_read_handler,
2, guest_check_no_write_in_dirty_log),
TEST_UFFD_AND_DIRTY_LOG(guest_at, with_af, 0, 1,
guest_check_no_write_in_dirty_log),
TEST_UFFD_AND_DIRTY_LOG(guest_exec, with_af, uffd_data_read_handler, 2,
guest_check_no_write_in_dirty_log),
TEST_UFFD_AND_DIRTY_LOG(guest_write64, with_af, uffd_data_write_handler,
2, guest_check_write_in_dirty_log),
TEST_UFFD_AND_DIRTY_LOG(guest_cas, with_af, uffd_data_read_handler, 2,
guest_check_write_in_dirty_log),
TEST_UFFD_AND_DIRTY_LOG(guest_dc_zva, with_af, uffd_data_write_handler,
2, guest_check_write_in_dirty_log),
TEST_UFFD_AND_DIRTY_LOG(guest_read64, with_af,
uffd_data_handler, 2,
guest_check_no_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_UFFD_AND_DIRTY_LOG(guest_read64, no_af,
uffd_data_handler, 2,
guest_check_no_write_in_dirty_log,
guest_check_no_s1ptw_wr_in_dirty_log),
TEST_UFFD_AND_DIRTY_LOG(guest_ld_preidx, with_af,
uffd_data_handler,
2, guest_check_no_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_UFFD_AND_DIRTY_LOG(guest_at, with_af, uffd_no_handler, 1,
guest_check_no_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_UFFD_AND_DIRTY_LOG(guest_exec, with_af,
uffd_data_handler, 2,
guest_check_no_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_UFFD_AND_DIRTY_LOG(guest_write64, with_af,
uffd_data_handler,
2, guest_check_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_UFFD_AND_DIRTY_LOG(guest_cas, with_af,
uffd_data_handler, 2,
guest_check_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_UFFD_AND_DIRTY_LOG(guest_dc_zva, with_af,
uffd_data_handler,
2, guest_check_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
TEST_UFFD_AND_DIRTY_LOG(guest_st_preidx, with_af,
uffd_data_write_handler, 2,
guest_check_write_in_dirty_log),
uffd_data_handler, 2,
guest_check_write_in_dirty_log,
guest_check_s1ptw_wr_in_dirty_log),
/*
* Try accesses when the data memory region is marked read-only
* Access when both the PT and data regions are marked read-only
* (with KVM_MEM_READONLY). Writes with a syndrome result in an
* MMIO exit, writes with no syndrome (e.g., CAS) result in a
* failed vcpu run, and reads/execs with and without syndroms do
@ -1018,7 +1044,7 @@ static struct test_desc tests[] = {
TEST_RO_MEMSLOT_NO_SYNDROME(guest_st_preidx),
/*
* Access when both the data region is both read-only and marked
* The PT and data regions are both read-only and marked
* for dirty logging at the same time. The expected result is that
* for writes there should be no write in the dirty log. The
* readonly handling is the same as if the memslot was not marked
@ -1043,7 +1069,7 @@ static struct test_desc tests[] = {
guest_check_no_write_in_dirty_log),
/*
* Access when the data region is both read-only and punched with
* The PT and data regions are both read-only and punched with
* holes tracked with userfaultfd. The expected result is the
* union of both userfaultfd and read-only behaviors. For example,
* write accesses result in a userfaultfd write fault and an MMIO
@ -1051,22 +1077,15 @@ static struct test_desc tests[] = {
* no userfaultfd write fault. Reads result in userfaultfd getting
* triggered.
*/
TEST_RO_MEMSLOT_AND_UFFD(guest_read64, 0, 0,
uffd_data_read_handler, 2),
TEST_RO_MEMSLOT_AND_UFFD(guest_ld_preidx, 0, 0,
uffd_data_read_handler, 2),
TEST_RO_MEMSLOT_AND_UFFD(guest_at, 0, 0,
uffd_no_handler, 1),
TEST_RO_MEMSLOT_AND_UFFD(guest_exec, 0, 0,
uffd_data_read_handler, 2),
TEST_RO_MEMSLOT_AND_UFFD(guest_read64, 0, 0, uffd_data_handler, 2),
TEST_RO_MEMSLOT_AND_UFFD(guest_ld_preidx, 0, 0, uffd_data_handler, 2),
TEST_RO_MEMSLOT_AND_UFFD(guest_at, 0, 0, uffd_no_handler, 1),
TEST_RO_MEMSLOT_AND_UFFD(guest_exec, 0, 0, uffd_data_handler, 2),
TEST_RO_MEMSLOT_AND_UFFD(guest_write64, mmio_on_test_gpa_handler, 1,
uffd_data_write_handler, 2),
TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_cas,
uffd_data_read_handler, 2),
TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_dc_zva,
uffd_no_handler, 1),
TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_st_preidx,
uffd_no_handler, 1),
uffd_data_handler, 2),
TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_cas, uffd_data_handler, 2),
TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_dc_zva, uffd_no_handler, 1),
TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_st_preidx, uffd_no_handler, 1),
{ 0 }
};