diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 8f07b13c9f15..edf024f1f141 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1889,6 +1889,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) int sparse_banks_len; u32 vector; bool all_cpus; + int i; if (hc->code == HVCALL_SEND_IPI) { if (!hc->fast) { @@ -1909,9 +1910,15 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) trace_kvm_hv_send_ipi(vector, sparse_banks[0]); } else { - if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex, - sizeof(send_ipi_ex)))) - return HV_STATUS_INVALID_HYPERCALL_INPUT; + if (!hc->fast) { + if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex, + sizeof(send_ipi_ex)))) + return HV_STATUS_INVALID_HYPERCALL_INPUT; + } else { + send_ipi_ex.vector = (u32)hc->ingpa; + send_ipi_ex.vp_set.format = hc->outgpa; + send_ipi_ex.vp_set.valid_bank_mask = sse128_lo(hc->xmm[0]); + } trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector, send_ipi_ex.vp_set.format, @@ -1919,8 +1926,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) vector = send_ipi_ex.vector; valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask; - sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) * - sizeof(sparse_banks[0]); + sparse_banks_len = bitmap_weight(&valid_bank_mask, 64); all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL; @@ -1930,12 +1936,27 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) if (!sparse_banks_len) goto ret_success; - if (kvm_read_guest(kvm, - hc->ingpa + offsetof(struct hv_send_ipi_ex, - vp_set.bank_contents), - sparse_banks, - sparse_banks_len)) - return HV_STATUS_INVALID_HYPERCALL_INPUT; + if (!hc->fast) { + if (kvm_read_guest(kvm, + hc->ingpa + offsetof(struct hv_send_ipi_ex, + vp_set.bank_contents), + sparse_banks, + sparse_banks_len * sizeof(sparse_banks[0]))) + return HV_STATUS_INVALID_HYPERCALL_INPUT; + } else { + /* + * The lower half of XMM0 is already consumed, each XMM holds + * two sparse banks. + */ + if (sparse_banks_len > (2 * HV_HYPERCALL_MAX_XMM_REGISTERS - 1)) + return HV_STATUS_INVALID_HYPERCALL_INPUT; + for (i = 0; i < sparse_banks_len; i++) { + if (i % 2) + sparse_banks[i] = sse128_lo(hc->xmm[(i + 1) / 2]); + else + sparse_banks[i] = sse128_hi(hc->xmm[i / 2]); + } + } } check_and_send_ipi: @@ -2097,6 +2118,7 @@ static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc) case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE: case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX: case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX: + case HVCALL_SEND_IPI_EX: return true; } @@ -2264,14 +2286,8 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) ret = kvm_hv_flush_tlb(vcpu, &hc); break; case HVCALL_SEND_IPI: - if (unlikely(hc.rep)) { - ret = HV_STATUS_INVALID_HYPERCALL_INPUT; - break; - } - ret = kvm_hv_send_ipi(vcpu, &hc); - break; case HVCALL_SEND_IPI_EX: - if (unlikely(hc.fast || hc.rep)) { + if (unlikely(hc.rep)) { ret = HV_STATUS_INVALID_HYPERCALL_INPUT; break; }