mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-31 08:28:13 +00:00
007faec014
Hyper-V needs to issue the GHCB HV call in order to read/write MSRs in Isolation VMs. For that, expose sev_es_ghcb_hv_call(). The Hyper-V Isolation VMs are unenlightened guests and run a paravisor at VMPL0 for communicating. GHCB pages are being allocated and set up by that paravisor. Linux gets the GHCB page's physical address via MSR_AMD64_SEV_ES_GHCB from the paravisor and should not change it. Add a @set_ghcb_msr parameter to sev_es_ghcb_hv_call() to control whether the function should set the GHCB's address prior to the call or not and export that function for use by HyperV. [ bp: - Massage commit message - add a struct ghcb forward declaration to fix randconfig builds. ] Signed-off-by: Tianyu Lan <Tianyu.Lan@microsoft.com> Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Michael Kelley <mikelley@microsoft.com> Link: https://lore.kernel.org/r/20211025122116.264793-6-ltykernel@gmail.com
98 lines
2.6 KiB
C
98 lines
2.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* AMD Encrypted Register State Support
|
|
*
|
|
* Author: Joerg Roedel <jroedel@suse.de>
|
|
*/
|
|
|
|
#ifndef __ASM_ENCRYPTED_STATE_H
|
|
#define __ASM_ENCRYPTED_STATE_H
|
|
|
|
#include <linux/types.h>
|
|
#include <asm/insn.h>
|
|
#include <asm/sev-common.h>
|
|
|
|
#define GHCB_PROTO_OUR 0x0001UL
|
|
#define GHCB_PROTOCOL_MAX 1ULL
|
|
#define GHCB_DEFAULT_USAGE 0ULL
|
|
|
|
#define VMGEXIT() { asm volatile("rep; vmmcall\n\r"); }
|
|
|
|
enum es_result {
|
|
ES_OK, /* All good */
|
|
ES_UNSUPPORTED, /* Requested operation not supported */
|
|
ES_VMM_ERROR, /* Unexpected state from the VMM */
|
|
ES_DECODE_FAILED, /* Instruction decoding failed */
|
|
ES_EXCEPTION, /* Instruction caused exception */
|
|
ES_RETRY, /* Retry instruction emulation */
|
|
};
|
|
|
|
struct es_fault_info {
|
|
unsigned long vector;
|
|
unsigned long error_code;
|
|
unsigned long cr2;
|
|
};
|
|
|
|
struct pt_regs;
|
|
|
|
/* ES instruction emulation context */
|
|
struct es_em_ctxt {
|
|
struct pt_regs *regs;
|
|
struct insn insn;
|
|
struct es_fault_info fi;
|
|
};
|
|
|
|
void do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code);
|
|
|
|
static inline u64 lower_bits(u64 val, unsigned int bits)
|
|
{
|
|
u64 mask = (1ULL << bits) - 1;
|
|
|
|
return (val & mask);
|
|
}
|
|
|
|
struct real_mode_header;
|
|
enum stack_type;
|
|
struct ghcb;
|
|
|
|
/* Early IDT entry points for #VC handler */
|
|
extern void vc_no_ghcb(void);
|
|
extern void vc_boot_ghcb(void);
|
|
extern bool handle_vc_boot_ghcb(struct pt_regs *regs);
|
|
|
|
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
|
extern struct static_key_false sev_es_enable_key;
|
|
extern void __sev_es_ist_enter(struct pt_regs *regs);
|
|
extern void __sev_es_ist_exit(void);
|
|
static __always_inline void sev_es_ist_enter(struct pt_regs *regs)
|
|
{
|
|
if (static_branch_unlikely(&sev_es_enable_key))
|
|
__sev_es_ist_enter(regs);
|
|
}
|
|
static __always_inline void sev_es_ist_exit(void)
|
|
{
|
|
if (static_branch_unlikely(&sev_es_enable_key))
|
|
__sev_es_ist_exit();
|
|
}
|
|
extern int sev_es_setup_ap_jump_table(struct real_mode_header *rmh);
|
|
extern void __sev_es_nmi_complete(void);
|
|
static __always_inline void sev_es_nmi_complete(void)
|
|
{
|
|
if (static_branch_unlikely(&sev_es_enable_key))
|
|
__sev_es_nmi_complete();
|
|
}
|
|
extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd);
|
|
extern enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
|
|
bool set_ghcb_msr,
|
|
struct es_em_ctxt *ctxt,
|
|
u64 exit_code, u64 exit_info_1,
|
|
u64 exit_info_2);
|
|
#else
|
|
static inline void sev_es_ist_enter(struct pt_regs *regs) { }
|
|
static inline void sev_es_ist_exit(void) { }
|
|
static inline int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) { return 0; }
|
|
static inline void sev_es_nmi_complete(void) { }
|
|
static inline int sev_es_efi_map_ghcbs(pgd_t *pgd) { return 0; }
|
|
#endif
|
|
|
|
#endif
|