hyperv-next for 5.16

-----BEGIN PGP SIGNATURE-----
 
 iQFHBAABCAAxFiEEIbPD0id6easf0xsudhRwX5BBoF4FAmGBMQUTHHdlaS5saXVA
 a2VybmVsLm9yZwAKCRB2FHBfkEGgXmE5B/9MK3Ju+tc6C8eyR3Ic4XBYHJ3voEKO
 M+R90gggBriDOgkz4B8vF+k0aD8wevXAUtmCSXonDzCh5H7GoyfrVZmJEVkwlioH
 ZMSMlFHcjGhCPIXhLbNtfo/NsAYEtT/lRM2lLGCSbdGuKabylXKujVdhuSIcRPdj
 Rj5innUgcAywOoxG6WzFt3JBzM33UQErCGfUF2b7Rvp9E+Zii4vIMxkMzUpnkEHH
 F8WMEdL0DqH5ThOs0MslNgy03pUC9wk1d5DNd9ytYHqiSQtcQZhFHw/P6dxzUFlW
 OptWv31PXUIsiJf4Zi9hmfjgUl+KZHeacZ2hXtidAo86VPcIjVs25OQW
 =40fn
 -----END PGP SIGNATURE-----

Merge tag 'hyperv-next-signed-20211102' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux

Pull hyperv updates from Wei Liu:

 - Initial patch set for Hyper-V isolation VM support (Tianyu Lan)

 - Fix a warning on preemption (Vitaly Kuznetsov)

 - A bunch of misc cleanup patches

* tag 'hyperv-next-signed-20211102' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux:
  x86/hyperv: Protect set_hv_tscchange_cb() against getting preempted
  Drivers: hv : vmbus: Adding NULL pointer check
  x86/hyperv: Remove duplicate include
  x86/hyperv: Remove duplicated include in hv_init
  Drivers: hv: vmbus: Remove unused code to check for subchannels
  Drivers: hv: vmbus: Initialize VMbus ring buffer for Isolation VM
  Drivers: hv: vmbus: Add SNP support for VMbus channel initiate message
  x86/hyperv: Add ghcb hvcall support for SNP VM
  x86/hyperv: Add Write/Read MSR registers via ghcb page
  Drivers: hv: vmbus: Mark vmbus ring buffer visible to host in Isolation VM
  x86/hyperv: Add new hvcall guest address host visibility support
  x86/hyperv: Initialize shared memory boundary in the Isolation VM.
  x86/hyperv: Initialize GHCB page in Isolation VM
This commit is contained in:
Linus Torvalds 2021-11-02 10:56:49 -07:00
commit 44261f8e28
21 changed files with 761 additions and 172 deletions

View File

@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y := hv_init.o mmu.o nested.o irqdomain.o
obj-y := hv_init.o mmu.o nested.o irqdomain.o ivm.o
obj-$(CONFIG_X86_64) += hv_apic.o hv_proc.o
ifdef CONFIG_X86_64

View File

@ -36,12 +36,42 @@ EXPORT_SYMBOL_GPL(hv_current_partition_id);
void *hv_hypercall_pg;
EXPORT_SYMBOL_GPL(hv_hypercall_pg);
union hv_ghcb __percpu **hv_ghcb_pg;
/* Storage to save the hypercall page temporarily for hibernation */
static void *hv_hypercall_pg_saved;
struct hv_vp_assist_page **hv_vp_assist_page;
EXPORT_SYMBOL_GPL(hv_vp_assist_page);
static int hyperv_init_ghcb(void)
{
u64 ghcb_gpa;
void *ghcb_va;
void **ghcb_base;
if (!hv_isolation_type_snp())
return 0;
if (!hv_ghcb_pg)
return -EINVAL;
/*
* GHCB page is allocated by paravisor. The address
* returned by MSR_AMD64_SEV_ES_GHCB is above shared
* memory boundary and map it here.
*/
rdmsrl(MSR_AMD64_SEV_ES_GHCB, ghcb_gpa);
ghcb_va = memremap(ghcb_gpa, HV_HYP_PAGE_SIZE, MEMREMAP_WB);
if (!ghcb_va)
return -ENOMEM;
ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
*ghcb_base = ghcb_va;
return 0;
}
static int hv_cpu_init(unsigned int cpu)
{
union hv_vp_assist_msr_contents msr = { 0 };
@ -85,7 +115,7 @@ static int hv_cpu_init(unsigned int cpu)
}
}
return 0;
return hyperv_init_ghcb();
}
static void (*hv_reenlightenment_cb)(void);
@ -139,7 +169,6 @@ void set_hv_tscchange_cb(void (*cb)(void))
struct hv_reenlightenment_control re_ctrl = {
.vector = HYPERV_REENLIGHTENMENT_VECTOR,
.enabled = 1,
.target_vp = hv_vp_index[smp_processor_id()]
};
struct hv_tsc_emulation_control emu_ctrl = {.enabled = 1};
@ -153,8 +182,12 @@ void set_hv_tscchange_cb(void (*cb)(void))
/* Make sure callback is registered before we write to MSRs */
wmb();
re_ctrl.target_vp = hv_vp_index[get_cpu()];
wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
wrmsrl(HV_X64_MSR_TSC_EMULATION_CONTROL, *((u64 *)&emu_ctrl));
put_cpu();
}
EXPORT_SYMBOL_GPL(set_hv_tscchange_cb);
@ -177,6 +210,14 @@ static int hv_cpu_die(unsigned int cpu)
{
struct hv_reenlightenment_control re_ctrl;
unsigned int new_cpu;
void **ghcb_va;
if (hv_ghcb_pg) {
ghcb_va = (void **)this_cpu_ptr(hv_ghcb_pg);
if (*ghcb_va)
memunmap(*ghcb_va);
*ghcb_va = NULL;
}
hv_common_cpu_die(cpu);
@ -366,10 +407,16 @@ void __init hyperv_init(void)
goto common_free;
}
if (hv_isolation_type_snp()) {
hv_ghcb_pg = alloc_percpu(union hv_ghcb *);
if (!hv_ghcb_pg)
goto free_vp_assist_page;
}
cpuhp = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/hyperv_init:online",
hv_cpu_init, hv_cpu_die);
if (cpuhp < 0)
goto free_vp_assist_page;
goto free_ghcb_page;
/*
* Setup the hypercall page and enable hypercalls.
@ -379,14 +426,15 @@ void __init hyperv_init(void)
guest_id = generate_guest_id(0, LINUX_VERSION_CODE, 0);
wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
/* Hyper-V requires to write guest os id via ghcb in SNP IVM. */
hv_ghcb_msr_write(HV_X64_MSR_GUEST_OS_ID, guest_id);
hv_hypercall_pg = __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START,
VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_ROX,
VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
__builtin_return_address(0));
if (hv_hypercall_pg == NULL) {
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
goto remove_cpuhp_state;
}
if (hv_hypercall_pg == NULL)
goto clean_guest_os_id;
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
hypercall_msr.enable = 1;
@ -456,8 +504,12 @@ void __init hyperv_init(void)
hv_query_ext_cap(0);
return;
remove_cpuhp_state:
clean_guest_os_id:
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
hv_ghcb_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
cpuhp_remove_state(cpuhp);
free_ghcb_page:
free_percpu(hv_ghcb_pg);
free_vp_assist_page:
kfree(hv_vp_assist_page);
hv_vp_assist_page = NULL;
@ -476,6 +528,7 @@ void hyperv_cleanup(void)
/* Reset our OS id */
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
hv_ghcb_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
/*
* Reset hypercall page reference before reset the page,
@ -546,16 +599,3 @@ bool hv_is_hyperv_initialized(void)
return hypercall_msr.enable;
}
EXPORT_SYMBOL_GPL(hv_is_hyperv_initialized);
enum hv_isolation_type hv_get_isolation_type(void)
{
if (!(ms_hyperv.priv_high & HV_ISOLATION))
return HV_ISOLATION_TYPE_NONE;
return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b);
}
EXPORT_SYMBOL_GPL(hv_get_isolation_type);
bool hv_is_isolation_supported(void)
{
return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE;
}

289
arch/x86/hyperv/ivm.c Normal file
View File

@ -0,0 +1,289 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Hyper-V Isolation VM interface with paravisor and hypervisor
*
* Author:
* Tianyu Lan <Tianyu.Lan@microsoft.com>
*/
#include <linux/bitfield.h>
#include <linux/hyperv.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <asm/svm.h>
#include <asm/sev.h>
#include <asm/io.h>
#include <asm/mshyperv.h>
#include <asm/hypervisor.h>
#ifdef CONFIG_AMD_MEM_ENCRYPT
#define GHCB_USAGE_HYPERV_CALL 1
union hv_ghcb {
struct ghcb ghcb;
struct {
u64 hypercalldata[509];
u64 outputgpa;
union {
union {
struct {
u32 callcode : 16;
u32 isfast : 1;
u32 reserved1 : 14;
u32 isnested : 1;
u32 countofelements : 12;
u32 reserved2 : 4;
u32 repstartindex : 12;
u32 reserved3 : 4;
};
u64 asuint64;
} hypercallinput;
union {
struct {
u16 callstatus;
u16 reserved1;
u32 elementsprocessed : 12;
u32 reserved2 : 20;
};
u64 asunit64;
} hypercalloutput;
};
u64 reserved2;
} hypercall;
} __packed __aligned(HV_HYP_PAGE_SIZE);
u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
{
union hv_ghcb *hv_ghcb;
void **ghcb_base;
unsigned long flags;
u64 status;
if (!hv_ghcb_pg)
return -EFAULT;
WARN_ON(in_nmi());
local_irq_save(flags);
ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
hv_ghcb = (union hv_ghcb *)*ghcb_base;
if (!hv_ghcb) {
local_irq_restore(flags);
return -EFAULT;
}
hv_ghcb->ghcb.protocol_version = GHCB_PROTOCOL_MAX;
hv_ghcb->ghcb.ghcb_usage = GHCB_USAGE_HYPERV_CALL;
hv_ghcb->hypercall.outputgpa = (u64)output;
hv_ghcb->hypercall.hypercallinput.asuint64 = 0;
hv_ghcb->hypercall.hypercallinput.callcode = control;
if (input_size)
memcpy(hv_ghcb->hypercall.hypercalldata, input, input_size);
VMGEXIT();
hv_ghcb->ghcb.ghcb_usage = 0xffffffff;
memset(hv_ghcb->ghcb.save.valid_bitmap, 0,
sizeof(hv_ghcb->ghcb.save.valid_bitmap));
status = hv_ghcb->hypercall.hypercalloutput.callstatus;
local_irq_restore(flags);
return status;
}
void hv_ghcb_msr_write(u64 msr, u64 value)
{
union hv_ghcb *hv_ghcb;
void **ghcb_base;
unsigned long flags;
struct es_em_ctxt ctxt;
if (!hv_ghcb_pg)
return;
WARN_ON(in_nmi());
local_irq_save(flags);
ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
hv_ghcb = (union hv_ghcb *)*ghcb_base;
if (!hv_ghcb) {
local_irq_restore(flags);
return;
}
ghcb_set_rcx(&hv_ghcb->ghcb, msr);
ghcb_set_rax(&hv_ghcb->ghcb, lower_32_bits(value));
ghcb_set_rdx(&hv_ghcb->ghcb, upper_32_bits(value));
if (sev_es_ghcb_hv_call(&hv_ghcb->ghcb, false, &ctxt,
SVM_EXIT_MSR, 1, 0))
pr_warn("Fail to write msr via ghcb %llx.\n", msr);
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(hv_ghcb_msr_write);
void hv_ghcb_msr_read(u64 msr, u64 *value)
{
union hv_ghcb *hv_ghcb;
void **ghcb_base;
unsigned long flags;
struct es_em_ctxt ctxt;
/* Check size of union hv_ghcb here. */
BUILD_BUG_ON(sizeof(union hv_ghcb) != HV_HYP_PAGE_SIZE);
if (!hv_ghcb_pg)
return;
WARN_ON(in_nmi());
local_irq_save(flags);
ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
hv_ghcb = (union hv_ghcb *)*ghcb_base;
if (!hv_ghcb) {
local_irq_restore(flags);
return;
}
ghcb_set_rcx(&hv_ghcb->ghcb, msr);
if (sev_es_ghcb_hv_call(&hv_ghcb->ghcb, false, &ctxt,
SVM_EXIT_MSR, 0, 0))
pr_warn("Fail to read msr via ghcb %llx.\n", msr);
else
*value = (u64)lower_32_bits(hv_ghcb->ghcb.save.rax)
| ((u64)lower_32_bits(hv_ghcb->ghcb.save.rdx) << 32);
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(hv_ghcb_msr_read);
#endif
enum hv_isolation_type hv_get_isolation_type(void)
{
if (!(ms_hyperv.priv_high & HV_ISOLATION))
return HV_ISOLATION_TYPE_NONE;
return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b);
}
EXPORT_SYMBOL_GPL(hv_get_isolation_type);
/*
* hv_is_isolation_supported - Check system runs in the Hyper-V
* isolation VM.
*/
bool hv_is_isolation_supported(void)
{
if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
return false;
if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
return false;
return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE;
}
DEFINE_STATIC_KEY_FALSE(isolation_type_snp);
/*
* hv_isolation_type_snp - Check system runs in the AMD SEV-SNP based
* isolation VM.
*/
bool hv_isolation_type_snp(void)
{
return static_branch_unlikely(&isolation_type_snp);
}
/*
* hv_mark_gpa_visibility - Set pages visible to host via hvcall.
*
* In Isolation VM, all guest memory is encrypted from host and guest
* needs to set memory visible to host via hvcall before sharing memory
* with host.
*/
static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
enum hv_mem_host_visibility visibility)
{
struct hv_gpa_range_for_visibility **input_pcpu, *input;
u16 pages_processed;
u64 hv_status;
unsigned long flags;
/* no-op if partition isolation is not enabled */
if (!hv_is_isolation_supported())
return 0;
if (count > HV_MAX_MODIFY_GPA_REP_COUNT) {
pr_err("Hyper-V: GPA count:%d exceeds supported:%lu\n", count,
HV_MAX_MODIFY_GPA_REP_COUNT);
return -EINVAL;
}
local_irq_save(flags);
input_pcpu = (struct hv_gpa_range_for_visibility **)
this_cpu_ptr(hyperv_pcpu_input_arg);
input = *input_pcpu;
if (unlikely(!input)) {
local_irq_restore(flags);
return -EINVAL;
}
input->partition_id = HV_PARTITION_ID_SELF;
input->host_visibility = visibility;
input->reserved0 = 0;
input->reserved1 = 0;
memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn));
hv_status = hv_do_rep_hypercall(
HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count,
0, input, &pages_processed);
local_irq_restore(flags);
if (hv_result_success(hv_status))
return 0;
else
return -EFAULT;
}
/*
* hv_set_mem_host_visibility - Set specified memory visible to host.
*
* In Isolation VM, all guest memory is encrypted from host and guest
* needs to set memory visible to host via hvcall before sharing memory
* with host. This function works as wrap of hv_mark_gpa_visibility()
* with memory base and size.
*/
int hv_set_mem_host_visibility(unsigned long kbuffer, int pagecount, bool visible)
{
enum hv_mem_host_visibility visibility = visible ?
VMBUS_PAGE_VISIBLE_READ_WRITE : VMBUS_PAGE_NOT_VISIBLE;
u64 *pfn_array;
int ret = 0;
int i, pfn;
if (!hv_is_isolation_supported() || !hv_hypercall_pg)
return 0;
pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
if (!pfn_array)
return -ENOMEM;
for (i = 0, pfn = 0; i < pagecount; i++) {
pfn_array[pfn] = virt_to_hvpfn((void *)kbuffer + i * HV_HYP_PAGE_SIZE);
pfn++;
if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
ret = hv_mark_gpa_visibility(pfn, pfn_array,
visibility);
if (ret)
goto err_free_pfn_array;
pfn = 0;
}
}
err_free_pfn_array:
kfree(pfn_array);
return ret;
}

View File

@ -276,6 +276,23 @@ enum hv_isolation_type {
#define HV_X64_MSR_TIME_REF_COUNT HV_REGISTER_TIME_REF_COUNT
#define HV_X64_MSR_REFERENCE_TSC HV_REGISTER_REFERENCE_TSC
/* Hyper-V memory host visibility */
enum hv_mem_host_visibility {
VMBUS_PAGE_NOT_VISIBLE = 0,
VMBUS_PAGE_VISIBLE_READ_ONLY = 1,
VMBUS_PAGE_VISIBLE_READ_WRITE = 3
};
/* HvCallModifySparseGpaPageHostVisibility hypercall */
#define HV_MAX_MODIFY_GPA_REP_COUNT ((PAGE_SIZE / sizeof(u64)) - 2)
struct hv_gpa_range_for_visibility {
u64 partition_id;
u32 host_visibility:2;
u32 reserved0:30;
u32 reserved1;
u64 gpa_page_list[HV_MAX_MODIFY_GPA_REP_COUNT];
} __packed;
/*
* Declare the MSR used to setup pages used to communicate with the hypervisor.
*/

View File

@ -11,23 +11,14 @@
#include <asm/paravirt.h>
#include <asm/mshyperv.h>
union hv_ghcb;
DECLARE_STATIC_KEY_FALSE(isolation_type_snp);
typedef int (*hyperv_fill_flush_list_func)(
struct hv_guest_mapping_flush_list *flush,
void *data);
static inline void hv_set_register(unsigned int reg, u64 value)
{
wrmsrl(reg, value);
}
static inline u64 hv_get_register(unsigned int reg)
{
u64 value;
rdmsrl(reg, value);
return value;
}
#define hv_get_raw_timer() rdtsc_ordered()
void hyperv_vector_handler(struct pt_regs *regs);
@ -39,6 +30,8 @@ extern void *hv_hypercall_pg;
extern u64 hv_current_partition_id;
extern union hv_ghcb __percpu **hv_ghcb_pg;
int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages);
int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id);
int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags);
@ -188,6 +181,50 @@ struct irq_domain *hv_create_pci_msi_domain(void);
int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector,
struct hv_interrupt_entry *entry);
int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry);
int hv_set_mem_host_visibility(unsigned long addr, int numpages, bool visible);
#ifdef CONFIG_AMD_MEM_ENCRYPT
void hv_ghcb_msr_write(u64 msr, u64 value);
void hv_ghcb_msr_read(u64 msr, u64 *value);
#else
static inline void hv_ghcb_msr_write(u64 msr, u64 value) {}
static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {}
#endif
extern bool hv_isolation_type_snp(void);
static inline bool hv_is_synic_reg(unsigned int reg)
{
if ((reg >= HV_REGISTER_SCONTROL) &&
(reg <= HV_REGISTER_SINT15))
return true;
return false;
}
static inline u64 hv_get_register(unsigned int reg)
{
u64 value;
if (hv_is_synic_reg(reg) && hv_isolation_type_snp())
hv_ghcb_msr_read(reg, &value);
else
rdmsrl(reg, value);
return value;
}
static inline void hv_set_register(unsigned int reg, u64 value)
{
if (hv_is_synic_reg(reg) && hv_isolation_type_snp()) {
hv_ghcb_msr_write(reg, value);
/* Write proxy bit via wrmsl instruction */
if (reg >= HV_REGISTER_SINT0 &&
reg <= HV_REGISTER_SINT15)
wrmsrl(reg, value | 1 << 20);
} else {
wrmsrl(reg, value);
}
}
#else /* CONFIG_HYPERV */
static inline void hyperv_init(void) {}
@ -205,6 +242,13 @@ static inline int hyperv_flush_guest_mapping_range(u64 as,
{
return -1;
}
static inline void hv_set_register(unsigned int reg, u64 value) { }
static inline u64 hv_get_register(unsigned int reg) { return 0; }
static inline int hv_set_mem_host_visibility(unsigned long addr, int numpages,
bool visible)
{
return -1;
}
#endif /* CONFIG_HYPERV */

View File

@ -313,9 +313,14 @@ static void __init ms_hyperv_init_platform(void)
if (ms_hyperv.priv_high & HV_ISOLATION) {
ms_hyperv.isolation_config_a = cpuid_eax(HYPERV_CPUID_ISOLATION_CONFIG);
ms_hyperv.isolation_config_b = cpuid_ebx(HYPERV_CPUID_ISOLATION_CONFIG);
ms_hyperv.shared_gpa_boundary =
BIT_ULL(ms_hyperv.shared_gpa_boundary_bits);
pr_info("Hyper-V: Isolation Config: Group A 0x%x, Group B 0x%x\n",
ms_hyperv.isolation_config_a, ms_hyperv.isolation_config_b);
if (hv_get_isolation_type() == HV_ISOLATION_TYPE_SNP)
static_branch_enable(&isolation_type_snp);
}
if (hv_max_functions_eax >= HYPERV_CPUID_NESTED_FEATURES) {

View File

@ -30,6 +30,8 @@
#include <asm/proto.h>
#include <asm/memtype.h>
#include <asm/set_memory.h>
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
#include "../mm_internal.h"
@ -1981,15 +1983,15 @@ int set_memory_global(unsigned long addr, int numpages)
__pgprot(_PAGE_GLOBAL), 0);
}
static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
/*
* __set_memory_enc_pgtable() is used for the hypervisors that get
* informed about "encryption" status via page tables.
*/
static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
{
struct cpa_data cpa;
int ret;
/* Nothing to do if memory encryption is not active */
if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return 0;
/* Should not be working on unaligned addresses */
if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr))
addr &= PAGE_MASK;
@ -2024,6 +2026,17 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
return ret;
}
static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
{
if (hv_is_isolation_supported())
return hv_set_mem_host_visibility(addr, numpages, !enc);
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return __set_memory_enc_pgtable(addr, numpages, enc);
return 0;
}
int set_memory_encrypted(unsigned long addr, int numpages)
{
return __set_memory_enc_dec(addr, numpages, true);

View File

@ -8,6 +8,7 @@ config HYPERV
|| (ARM64 && !CPU_BIG_ENDIAN))
select PARAVIRT
select X86_HV_CALLBACK_VECTOR if X86
select VMAP_PFN
help
Select this option to run Linux as a Hyper-V client operating
system.

View File

@ -17,6 +17,7 @@
#include <linux/hyperv.h>
#include <linux/uio.h>
#include <linux/interrupt.h>
#include <linux/set_memory.h>
#include <asm/page.h>
#include <asm/mshyperv.h>
@ -456,7 +457,7 @@ nomem:
static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
enum hv_gpadl_type type, void *kbuffer,
u32 size, u32 send_offset,
u32 *gpadl_handle)
struct vmbus_gpadl *gpadl)
{
struct vmbus_channel_gpadl_header *gpadlmsg;
struct vmbus_channel_gpadl_body *gpadl_body;
@ -474,6 +475,15 @@ static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
if (ret)
return ret;
ret = set_memory_decrypted((unsigned long)kbuffer,
PFN_UP(size));
if (ret) {
dev_warn(&channel->device_obj->device,
"Failed to set host visibility for new GPADL %d.\n",
ret);
return ret;
}
init_completion(&msginfo->waitevent);
msginfo->waiting_channel = channel;
@ -537,7 +547,10 @@ static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
}
/* At this point, we received the gpadl created msg */
*gpadl_handle = gpadlmsg->gpadl;
gpadl->gpadl_handle = gpadlmsg->gpadl;
gpadl->buffer = kbuffer;
gpadl->size = size;
cleanup:
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
@ -549,6 +562,11 @@ cleanup:
}
kfree(msginfo);
if (ret)
set_memory_encrypted((unsigned long)kbuffer,
PFN_UP(size));
return ret;
}
@ -561,10 +579,10 @@ cleanup:
* @gpadl_handle: some funky thing
*/
int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
u32 size, u32 *gpadl_handle)
u32 size, struct vmbus_gpadl *gpadl)
{
return __vmbus_establish_gpadl(channel, HV_GPADL_BUFFER, kbuffer, size,
0U, gpadl_handle);
0U, gpadl);
}
EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
@ -665,17 +683,8 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
if (!newchannel->max_pkt_size)
newchannel->max_pkt_size = VMBUS_DEFAULT_MAX_PKT_SIZE;
err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages, 0);
if (err)
goto error_clean_ring;
err = hv_ringbuffer_init(&newchannel->inbound, &page[send_pages],
recv_pages, newchannel->max_pkt_size);
if (err)
goto error_clean_ring;
/* Establish the gpadl for the ring buffer */
newchannel->ringbuffer_gpadlhandle = 0;
newchannel->ringbuffer_gpadlhandle.gpadl_handle = 0;
err = __vmbus_establish_gpadl(newchannel, HV_GPADL_RING,
page_address(newchannel->ringbuffer_page),
@ -685,6 +694,16 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
if (err)
goto error_clean_ring;
err = hv_ringbuffer_init(&newchannel->outbound,
page, send_pages, 0);
if (err)
goto error_free_gpadl;
err = hv_ringbuffer_init(&newchannel->inbound, &page[send_pages],
recv_pages, newchannel->max_pkt_size);
if (err)
goto error_free_gpadl;
/* Create and init the channel open message */
open_info = kzalloc(sizeof(*open_info) +
sizeof(struct vmbus_channel_open_channel),
@ -701,7 +720,8 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
open_msg->openid = newchannel->offermsg.child_relid;
open_msg->child_relid = newchannel->offermsg.child_relid;
open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
open_msg->ringbuffer_gpadlhandle
= newchannel->ringbuffer_gpadlhandle.gpadl_handle;
/*
* The unit of ->downstream_ringbuffer_pageoffset is HV_HYP_PAGE and
* the unit of ->ringbuffer_send_offset (i.e. send_pages) is PAGE, so
@ -759,8 +779,7 @@ error_clean_msglist:
error_free_info:
kfree(open_info);
error_free_gpadl:
vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
newchannel->ringbuffer_gpadlhandle = 0;
vmbus_teardown_gpadl(newchannel, &newchannel->ringbuffer_gpadlhandle);
error_clean_ring:
hv_ringbuffer_cleanup(&newchannel->outbound);
hv_ringbuffer_cleanup(&newchannel->inbound);
@ -806,7 +825,7 @@ EXPORT_SYMBOL_GPL(vmbus_open);
/*
* vmbus_teardown_gpadl -Teardown the specified GPADL handle
*/
int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
int vmbus_teardown_gpadl(struct vmbus_channel *channel, struct vmbus_gpadl *gpadl)
{
struct vmbus_channel_gpadl_teardown *msg;
struct vmbus_channel_msginfo *info;
@ -825,7 +844,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN;
msg->child_relid = channel->offermsg.child_relid;
msg->gpadl = gpadl_handle;
msg->gpadl = gpadl->gpadl_handle;
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_add_tail(&info->msglistentry,
@ -845,6 +864,8 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
wait_for_completion(&info->waitevent);
gpadl->gpadl_handle = 0;
post_msg_err:
/*
* If the channel has been rescinded;
@ -859,6 +880,12 @@ post_msg_err:
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
kfree(info);
ret = set_memory_encrypted((unsigned long)gpadl->buffer,
PFN_UP(gpadl->size));
if (ret)
pr_warn("Fail to set mem host visibility in GPADL teardown %d.\n", ret);
return ret;
}
EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
@ -933,9 +960,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
}
/* Tear down the gpadl for the channel's ring buffer */
else if (channel->ringbuffer_gpadlhandle) {
ret = vmbus_teardown_gpadl(channel,
channel->ringbuffer_gpadlhandle);
else if (channel->ringbuffer_gpadlhandle.gpadl_handle) {
ret = vmbus_teardown_gpadl(channel, &channel->ringbuffer_gpadlhandle);
if (ret) {
pr_err("Close failed: teardown gpadl return %d\n", ret);
/*
@ -943,8 +969,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
* it is perhaps better to leak memory.
*/
}
channel->ringbuffer_gpadlhandle = 0;
}
if (!ret)

View File

@ -1581,21 +1581,6 @@ cleanup:
return ret;
}
static void invoke_sc_cb(struct vmbus_channel *primary_channel)
{
struct list_head *cur, *tmp;
struct vmbus_channel *cur_channel;
if (primary_channel->sc_creation_callback == NULL)
return;
list_for_each_safe(cur, tmp, &primary_channel->sc_list) {
cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
primary_channel->sc_creation_callback(cur_channel);
}
}
void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
void (*sc_cr_cb)(struct vmbus_channel *new_sc))
{
@ -1603,25 +1588,6 @@ void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
}
EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback);
bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
{
bool ret;
ret = !list_empty(&primary->sc_list);
if (ret) {
/*
* Invoke the callback on sub-channel creation.
* This will present a uniform interface to the
* clients.
*/
invoke_sc_cb(primary);
}
return ret;
}
EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);
void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
void (*chn_rescind_cb)(struct vmbus_channel *))
{

View File

@ -19,6 +19,8 @@
#include <linux/vmalloc.h>
#include <linux/hyperv.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/set_memory.h>
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
@ -102,8 +104,9 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
vmbus_connection.msg_conn_id = VMBUS_MESSAGE_CONNECTION_ID;
}
msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
msg->monitor_page1 = vmbus_connection.monitor_pages_pa[0];
msg->monitor_page2 = vmbus_connection.monitor_pages_pa[1];
msg->target_vcpu = hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU);
/*
@ -216,6 +219,65 @@ int vmbus_connect(void)
goto cleanup;
}
vmbus_connection.monitor_pages_original[0]
= vmbus_connection.monitor_pages[0];
vmbus_connection.monitor_pages_original[1]
= vmbus_connection.monitor_pages[1];
vmbus_connection.monitor_pages_pa[0]
= virt_to_phys(vmbus_connection.monitor_pages[0]);
vmbus_connection.monitor_pages_pa[1]
= virt_to_phys(vmbus_connection.monitor_pages[1]);
if (hv_is_isolation_supported()) {
ret = set_memory_decrypted((unsigned long)
vmbus_connection.monitor_pages[0],
1);
ret |= set_memory_decrypted((unsigned long)
vmbus_connection.monitor_pages[1],
1);
if (ret)
goto cleanup;
/*
* Isolation VM with AMD SNP needs to access monitor page via
* address space above shared gpa boundary.
*/
if (hv_isolation_type_snp()) {
vmbus_connection.monitor_pages_pa[0] +=
ms_hyperv.shared_gpa_boundary;
vmbus_connection.monitor_pages_pa[1] +=
ms_hyperv.shared_gpa_boundary;
vmbus_connection.monitor_pages[0]
= memremap(vmbus_connection.monitor_pages_pa[0],
HV_HYP_PAGE_SIZE,
MEMREMAP_WB);
if (!vmbus_connection.monitor_pages[0]) {
ret = -ENOMEM;
goto cleanup;
}
vmbus_connection.monitor_pages[1]
= memremap(vmbus_connection.monitor_pages_pa[1],
HV_HYP_PAGE_SIZE,
MEMREMAP_WB);
if (!vmbus_connection.monitor_pages[1]) {
ret = -ENOMEM;
goto cleanup;
}
}
/*
* Set memory host visibility hvcall smears memory
* and so zero monitor pages here.
*/
memset(vmbus_connection.monitor_pages[0], 0x00,
HV_HYP_PAGE_SIZE);
memset(vmbus_connection.monitor_pages[1], 0x00,
HV_HYP_PAGE_SIZE);
}
msginfo = kzalloc(sizeof(*msginfo) +
sizeof(struct vmbus_channel_initiate_contact),
GFP_KERNEL);
@ -303,10 +365,31 @@ void vmbus_disconnect(void)
vmbus_connection.int_page = NULL;
}
hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[0]);
hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[1]);
vmbus_connection.monitor_pages[0] = NULL;
vmbus_connection.monitor_pages[1] = NULL;
if (hv_is_isolation_supported()) {
/*
* memunmap() checks input address is ioremap address or not
* inside. It doesn't unmap any thing in the non-SNP CVM and
* so not check CVM type here.
*/
memunmap(vmbus_connection.monitor_pages[0]);
memunmap(vmbus_connection.monitor_pages[1]);
set_memory_encrypted((unsigned long)
vmbus_connection.monitor_pages_original[0],
1);
set_memory_encrypted((unsigned long)
vmbus_connection.monitor_pages_original[1],
1);
}
hv_free_hyperv_page((unsigned long)
vmbus_connection.monitor_pages_original[0]);
hv_free_hyperv_page((unsigned long)
vmbus_connection.monitor_pages_original[1]);
vmbus_connection.monitor_pages_original[0] =
vmbus_connection.monitor_pages[0] = NULL;
vmbus_connection.monitor_pages_original[1] =
vmbus_connection.monitor_pages[1] = NULL;
}
/*
@ -447,6 +530,10 @@ void vmbus_set_event(struct vmbus_channel *channel)
++channel->sig_events;
hv_do_fast_hypercall8(HVCALL_SIGNAL_EVENT, channel->sig_event);
if (hv_isolation_type_snp())
hv_ghcb_hypercall(HVCALL_SIGNAL_EVENT, &channel->sig_event,
NULL, sizeof(channel->sig_event));
else
hv_do_fast_hypercall8(HVCALL_SIGNAL_EVENT, channel->sig_event);
}
EXPORT_SYMBOL_GPL(vmbus_set_event);

View File

@ -8,6 +8,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
@ -97,7 +98,13 @@ int hv_post_message(union hv_connection_id connection_id,
aligned_msg->payload_size = payload_size;
memcpy((void *)aligned_msg->payload, payload, payload_size);
status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL);
if (hv_isolation_type_snp())
status = hv_ghcb_hypercall(HVCALL_POST_MESSAGE,
(void *)aligned_msg, NULL,
sizeof(*aligned_msg));
else
status = hv_do_hypercall(HVCALL_POST_MESSAGE,
aligned_msg, NULL);
/* Preemption must remain disabled until after the hypercall
* so some other thread can't get scheduled onto this cpu and
@ -136,17 +143,24 @@ int hv_synic_alloc(void)
tasklet_init(&hv_cpu->msg_dpc,
vmbus_on_msg_dpc, (unsigned long) hv_cpu);
hv_cpu->synic_message_page =
(void *)get_zeroed_page(GFP_ATOMIC);
if (hv_cpu->synic_message_page == NULL) {
pr_err("Unable to allocate SYNIC message page\n");
goto err;
}
/*
* Synic message and event pages are allocated by paravisor.
* Skip these pages allocation here.
*/
if (!hv_isolation_type_snp()) {
hv_cpu->synic_message_page =
(void *)get_zeroed_page(GFP_ATOMIC);
if (hv_cpu->synic_message_page == NULL) {
pr_err("Unable to allocate SYNIC message page\n");
goto err;
}
hv_cpu->synic_event_page = (void *)get_zeroed_page(GFP_ATOMIC);
if (hv_cpu->synic_event_page == NULL) {
pr_err("Unable to allocate SYNIC event page\n");
goto err;
hv_cpu->synic_event_page =
(void *)get_zeroed_page(GFP_ATOMIC);
if (hv_cpu->synic_event_page == NULL) {
pr_err("Unable to allocate SYNIC event page\n");
goto err;
}
}
hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
@ -201,16 +215,35 @@ void hv_synic_enable_regs(unsigned int cpu)
/* Setup the Synic's message page */
simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
simp.simp_enabled = 1;
simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
>> HV_HYP_PAGE_SHIFT;
if (hv_isolation_type_snp()) {
hv_cpu->synic_message_page
= memremap(simp.base_simp_gpa << HV_HYP_PAGE_SHIFT,
HV_HYP_PAGE_SIZE, MEMREMAP_WB);
if (!hv_cpu->synic_message_page)
pr_err("Fail to map syinc message page.\n");
} else {
simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
>> HV_HYP_PAGE_SHIFT;
}
hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
/* Setup the Synic's event page */
siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
siefp.siefp_enabled = 1;
siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
>> HV_HYP_PAGE_SHIFT;
if (hv_isolation_type_snp()) {
hv_cpu->synic_event_page =
memremap(siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT,
HV_HYP_PAGE_SIZE, MEMREMAP_WB);
if (!hv_cpu->synic_event_page)
pr_err("Fail to map syinc event page.\n");
} else {
siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
>> HV_HYP_PAGE_SHIFT;
}
hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
@ -257,6 +290,8 @@ int hv_synic_init(unsigned int cpu)
*/
void hv_synic_disable_regs(unsigned int cpu)
{
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
union hv_synic_sint shared_sint;
union hv_synic_simp simp;
union hv_synic_siefp siefp;
@ -273,14 +308,27 @@ void hv_synic_disable_regs(unsigned int cpu)
shared_sint.as_uint64);
simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
/*
* In Isolation VM, sim and sief pages are allocated by
* paravisor. These pages also will be used by kdump
* kernel. So just reset enable bit here and keep page
* addresses.
*/
simp.simp_enabled = 0;
simp.base_simp_gpa = 0;
if (hv_isolation_type_snp())
memunmap(hv_cpu->synic_message_page);
else
simp.base_simp_gpa = 0;
hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
siefp.siefp_enabled = 0;
siefp.base_siefp_gpa = 0;
if (hv_isolation_type_snp())
memunmap(hv_cpu->synic_event_page);
else
siefp.base_siefp_gpa = 0;
hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);

View File

@ -249,6 +249,12 @@ bool __weak hv_is_isolation_supported(void)
}
EXPORT_SYMBOL_GPL(hv_is_isolation_supported);
bool __weak hv_isolation_type_snp(void)
{
return false;
}
EXPORT_SYMBOL_GPL(hv_isolation_type_snp);
void __weak hv_setup_vmbus_handler(void (*handler)(void))
{
}
@ -283,3 +289,9 @@ void __weak hyperv_cleanup(void)
{
}
EXPORT_SYMBOL_GPL(hyperv_cleanup);
u64 __weak hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
{
return HV_STATUS_INVALID_PARAMETER;
}
EXPORT_SYMBOL_GPL(hv_ghcb_hypercall);

View File

@ -241,6 +241,8 @@ struct vmbus_connection {
* is child->parent notification
*/
struct hv_monitor_page *monitor_pages[2];
void *monitor_pages_original[2];
phys_addr_t monitor_pages_pa[2];
struct list_head chn_msg_list;
spinlock_t channelmsg_lock;

View File

@ -17,6 +17,8 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
#include <linux/io.h>
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
@ -183,8 +185,10 @@ void hv_ringbuffer_pre_init(struct vmbus_channel *channel)
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
struct page *pages, u32 page_cnt, u32 max_pkt_size)
{
int i;
struct page **pages_wraparound;
unsigned long *pfns_wraparound;
u64 pfn;
int i;
BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
@ -192,23 +196,50 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
* First page holds struct hv_ring_buffer, do wraparound mapping for
* the rest.
*/
pages_wraparound = kcalloc(page_cnt * 2 - 1, sizeof(struct page *),
GFP_KERNEL);
if (!pages_wraparound)
return -ENOMEM;
if (hv_isolation_type_snp()) {
pfn = page_to_pfn(pages) +
PFN_DOWN(ms_hyperv.shared_gpa_boundary);
pages_wraparound[0] = pages;
for (i = 0; i < 2 * (page_cnt - 1); i++)
pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
pfns_wraparound = kcalloc(page_cnt * 2 - 1,
sizeof(unsigned long), GFP_KERNEL);
if (!pfns_wraparound)
return -ENOMEM;
ring_info->ring_buffer = (struct hv_ring_buffer *)
vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
pfns_wraparound[0] = pfn;
for (i = 0; i < 2 * (page_cnt - 1); i++)
pfns_wraparound[i + 1] = pfn + i % (page_cnt - 1) + 1;
kfree(pages_wraparound);
ring_info->ring_buffer = (struct hv_ring_buffer *)
vmap_pfn(pfns_wraparound, page_cnt * 2 - 1,
PAGE_KERNEL);
kfree(pfns_wraparound);
if (!ring_info->ring_buffer)
return -ENOMEM;
/* Zero ring buffer after setting memory host visibility. */
memset(ring_info->ring_buffer, 0x00, PAGE_SIZE * page_cnt);
} else {
pages_wraparound = kcalloc(page_cnt * 2 - 1,
sizeof(struct page *),
GFP_KERNEL);
if (!pages_wraparound)
return -ENOMEM;
pages_wraparound[0] = pages;
for (i = 0; i < 2 * (page_cnt - 1); i++)
pages_wraparound[i + 1] =
&pages[i % (page_cnt - 1) + 1];
ring_info->ring_buffer = (struct hv_ring_buffer *)
vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP,
PAGE_KERNEL);
kfree(pages_wraparound);
if (!ring_info->ring_buffer)
return -ENOMEM;
}
if (!ring_info->ring_buffer)
return -ENOMEM;
ring_info->ring_buffer->read_index =
ring_info->ring_buffer->write_index = 0;

View File

@ -1075,14 +1075,15 @@ struct netvsc_device {
/* Receive buffer allocated by us but manages by NetVSP */
void *recv_buf;
u32 recv_buf_size; /* allocated bytes */
u32 recv_buf_gpadl_handle;
struct vmbus_gpadl recv_buf_gpadl_handle;
u32 recv_section_cnt;
u32 recv_section_size;
u32 recv_completion_cnt;
/* Send buffer allocated by us */
void *send_buf;
u32 send_buf_gpadl_handle;
u32 send_buf_size;
struct vmbus_gpadl send_buf_gpadl_handle;
u32 send_section_cnt;
u32 send_section_size;
unsigned long *send_section_map;

View File

@ -278,9 +278,9 @@ static void netvsc_teardown_recv_gpadl(struct hv_device *device,
{
int ret;
if (net_device->recv_buf_gpadl_handle) {
if (net_device->recv_buf_gpadl_handle.gpadl_handle) {
ret = vmbus_teardown_gpadl(device->channel,
net_device->recv_buf_gpadl_handle);
&net_device->recv_buf_gpadl_handle);
/* If we failed here, we might as well return and have a leak
* rather than continue and a bugchk
@ -290,7 +290,6 @@ static void netvsc_teardown_recv_gpadl(struct hv_device *device,
"unable to teardown receive buffer's gpadl\n");
return;
}
net_device->recv_buf_gpadl_handle = 0;
}
}
@ -300,9 +299,9 @@ static void netvsc_teardown_send_gpadl(struct hv_device *device,
{
int ret;
if (net_device->send_buf_gpadl_handle) {
if (net_device->send_buf_gpadl_handle.gpadl_handle) {
ret = vmbus_teardown_gpadl(device->channel,
net_device->send_buf_gpadl_handle);
&net_device->send_buf_gpadl_handle);
/* If we failed here, we might as well return and have a leak
* rather than continue and a bugchk
@ -312,7 +311,6 @@ static void netvsc_teardown_send_gpadl(struct hv_device *device,
"unable to teardown send buffer's gpadl\n");
return;
}
net_device->send_buf_gpadl_handle = 0;
}
}
@ -380,7 +378,7 @@ static int netvsc_init_buf(struct hv_device *device,
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
init_packet->msg.v1_msg.send_recv_buf.
gpadl_handle = net_device->recv_buf_gpadl_handle;
gpadl_handle = net_device->recv_buf_gpadl_handle.gpadl_handle;
init_packet->msg.v1_msg.
send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
@ -463,6 +461,7 @@ static int netvsc_init_buf(struct hv_device *device,
ret = -ENOMEM;
goto cleanup;
}
net_device->send_buf_size = buf_size;
/* Establish the gpadl handle for this buffer on this
* channel. Note: This call uses the vmbus connection rather
@ -482,7 +481,7 @@ static int netvsc_init_buf(struct hv_device *device,
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
net_device->send_buf_gpadl_handle;
net_device->send_buf_gpadl_handle.gpadl_handle;
init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
trace_nvsp_send(ndev, init_packet);

View File

@ -58,11 +58,11 @@ struct hv_uio_private_data {
atomic_t refcnt;
void *recv_buf;
u32 recv_gpadl;
struct vmbus_gpadl recv_gpadl;
char recv_name[32]; /* "recv_4294967295" */
void *send_buf;
u32 send_gpadl;
struct vmbus_gpadl send_gpadl;
char send_name[32];
};
@ -179,15 +179,13 @@ hv_uio_new_channel(struct vmbus_channel *new_sc)
static void
hv_uio_cleanup(struct hv_device *dev, struct hv_uio_private_data *pdata)
{
if (pdata->send_gpadl) {
vmbus_teardown_gpadl(dev->channel, pdata->send_gpadl);
pdata->send_gpadl = 0;
if (pdata->send_gpadl.gpadl_handle) {
vmbus_teardown_gpadl(dev->channel, &pdata->send_gpadl);
vfree(pdata->send_buf);
}
if (pdata->recv_gpadl) {
vmbus_teardown_gpadl(dev->channel, pdata->recv_gpadl);
pdata->recv_gpadl = 0;
if (pdata->recv_gpadl.gpadl_handle) {
vmbus_teardown_gpadl(dev->channel, &pdata->recv_gpadl);
vfree(pdata->recv_buf);
}
}
@ -303,7 +301,7 @@ hv_uio_probe(struct hv_device *dev,
/* put Global Physical Address Label in name */
snprintf(pdata->recv_name, sizeof(pdata->recv_name),
"recv:%u", pdata->recv_gpadl);
"recv:%u", pdata->recv_gpadl.gpadl_handle);
pdata->info.mem[RECV_BUF_MAP].name = pdata->recv_name;
pdata->info.mem[RECV_BUF_MAP].addr
= (uintptr_t)pdata->recv_buf;
@ -324,7 +322,7 @@ hv_uio_probe(struct hv_device *dev,
}
snprintf(pdata->send_name, sizeof(pdata->send_name),
"send:%u", pdata->send_gpadl);
"send:%u", pdata->send_gpadl.gpadl_handle);
pdata->info.mem[SEND_BUF_MAP].name = pdata->send_name;
pdata->info.mem[SEND_BUF_MAP].addr
= (uintptr_t)pdata->send_buf;

View File

@ -158,6 +158,7 @@ struct ms_hyperv_tsc_page {
#define HVCALL_RETARGET_INTERRUPT 0x007e
#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af
#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0
#define HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY 0x00db
/* Extended hypercalls */
#define HV_EXT_CALL_QUERY_CAPABILITIES 0x8001

View File

@ -35,7 +35,17 @@ struct ms_hyperv_info {
u32 max_vp_index;
u32 max_lp_index;
u32 isolation_config_a;
u32 isolation_config_b;
union {
u32 isolation_config_b;
struct {
u32 cvm_type : 4;
u32 reserved1 : 1;
u32 shared_gpa_boundary_active : 1;
u32 shared_gpa_boundary_bits : 6;
u32 reserved2 : 20;
};
};
u64 shared_gpa_boundary;
};
extern struct ms_hyperv_info ms_hyperv;
@ -44,6 +54,7 @@ extern void __percpu **hyperv_pcpu_output_arg;
extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
extern bool hv_isolation_type_snp(void);
/* Helper functions that provide a consistent pattern for checking Hyper-V hypercall status. */
static inline int hv_result(u64 status)
@ -254,12 +265,19 @@ bool hv_is_hyperv_initialized(void);
bool hv_is_hibernation_supported(void);
enum hv_isolation_type hv_get_isolation_type(void);
bool hv_is_isolation_supported(void);
bool hv_isolation_type_snp(void);
u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
void hyperv_cleanup(void);
bool hv_query_ext_cap(u64 cap_query);
#else /* CONFIG_HYPERV */
static inline bool hv_is_hyperv_initialized(void) { return false; }
static inline bool hv_is_hibernation_supported(void) { return false; }
static inline void hyperv_cleanup(void) {}
static inline bool hv_is_isolation_supported(void) { return false; }
static inline enum hv_isolation_type hv_get_isolation_type(void)
{
return HV_ISOLATION_TYPE_NONE;
}
#endif /* CONFIG_HYPERV */
#endif

View File

@ -803,6 +803,12 @@ struct vmbus_device {
#define VMBUS_DEFAULT_MAX_PKT_SIZE 4096
struct vmbus_gpadl {
u32 gpadl_handle;
u32 size;
void *buffer;
};
struct vmbus_channel {
struct list_head listentry;
@ -822,7 +828,7 @@ struct vmbus_channel {
bool rescind_ref; /* got rescind msg, got channel reference */
struct completion rescind_event;
u32 ringbuffer_gpadlhandle;
struct vmbus_gpadl ringbuffer_gpadlhandle;
/* Allocated memory for ring buffer */
struct page *ringbuffer_page;
@ -1100,19 +1106,6 @@ void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
void (*chn_rescind_cb)(struct vmbus_channel *));
/*
* Check if sub-channels have already been offerred. This API will be useful
* when the driver is unloaded after establishing sub-channels. In this case,
* when the driver is re-loaded, the driver would have to check if the
* subchannels have already been established before attempting to request
* the creation of sub-channels.
* This function returns TRUE to indicate that subchannels have already been
* created.
* This function should be invoked after setting the callback function for
* sub-channel creation.
*/
bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
/* The format must be the same as struct vmdata_gpa_direct */
struct vmbus_channel_packet_page_buffer {
u16 type;
@ -1192,10 +1185,10 @@ extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
void *kbuffer,
u32 size,
u32 *gpadl_handle);
struct vmbus_gpadl *gpadl);
extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
u32 gpadl_handle);
struct vmbus_gpadl *gpadl);
void vmbus_reset_channel_cb(struct vmbus_channel *channel);