mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 00:48:50 +00:00
b81fc74d53
The guest and the hypervisor contain separate macros to get and set the GHCB MSR protocol and NAE event fields. Consolidate the GHCB protocol definitions and helper macros in one place. Leave the supported protocol version define in separate files to keep the guest and hypervisor flexibility to support different GHCB version in the same release. There is no functional change intended. Signed-off-by: Brijesh Singh <brijesh.singh@amd.com> Signed-off-by: Borislav Petkov <bp@suse.de> Acked-by: Joerg Roedel <jroedel@suse.de> Link: https://lkml.kernel.org/r/20210427111636.1207-3-brijesh.singh@amd.com
92 lines
2.4 KiB
C
92 lines
2.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* AMD Encrypted Register State Support
|
|
*
|
|
* Author: Joerg Roedel <jroedel@suse.de>
|
|
*/
|
|
|
|
#ifndef __ASM_ENCRYPTED_STATE_H
|
|
#define __ASM_ENCRYPTED_STATE_H
|
|
|
|
#include <linux/types.h>
|
|
#include <asm/insn.h>
|
|
#include <asm/sev-common.h>
|
|
|
|
#define GHCB_PROTO_OUR 0x0001UL
|
|
#define GHCB_PROTOCOL_MAX 1ULL
|
|
#define GHCB_DEFAULT_USAGE 0ULL
|
|
|
|
#define VMGEXIT() { asm volatile("rep; vmmcall\n\r"); }
|
|
|
|
enum es_result {
|
|
ES_OK, /* All good */
|
|
ES_UNSUPPORTED, /* Requested operation not supported */
|
|
ES_VMM_ERROR, /* Unexpected state from the VMM */
|
|
ES_DECODE_FAILED, /* Instruction decoding failed */
|
|
ES_EXCEPTION, /* Instruction caused exception */
|
|
ES_RETRY, /* Retry instruction emulation */
|
|
};
|
|
|
|
struct es_fault_info {
|
|
unsigned long vector;
|
|
unsigned long error_code;
|
|
unsigned long cr2;
|
|
};
|
|
|
|
struct pt_regs;
|
|
|
|
/* ES instruction emulation context */
|
|
struct es_em_ctxt {
|
|
struct pt_regs *regs;
|
|
struct insn insn;
|
|
struct es_fault_info fi;
|
|
};
|
|
|
|
void do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code);
|
|
|
|
static inline u64 lower_bits(u64 val, unsigned int bits)
|
|
{
|
|
u64 mask = (1ULL << bits) - 1;
|
|
|
|
return (val & mask);
|
|
}
|
|
|
|
struct real_mode_header;
|
|
enum stack_type;
|
|
|
|
/* Early IDT entry points for #VC handler */
|
|
extern void vc_no_ghcb(void);
|
|
extern void vc_boot_ghcb(void);
|
|
extern bool handle_vc_boot_ghcb(struct pt_regs *regs);
|
|
|
|
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
|
extern struct static_key_false sev_es_enable_key;
|
|
extern void __sev_es_ist_enter(struct pt_regs *regs);
|
|
extern void __sev_es_ist_exit(void);
|
|
static __always_inline void sev_es_ist_enter(struct pt_regs *regs)
|
|
{
|
|
if (static_branch_unlikely(&sev_es_enable_key))
|
|
__sev_es_ist_enter(regs);
|
|
}
|
|
static __always_inline void sev_es_ist_exit(void)
|
|
{
|
|
if (static_branch_unlikely(&sev_es_enable_key))
|
|
__sev_es_ist_exit();
|
|
}
|
|
extern int sev_es_setup_ap_jump_table(struct real_mode_header *rmh);
|
|
extern void __sev_es_nmi_complete(void);
|
|
static __always_inline void sev_es_nmi_complete(void)
|
|
{
|
|
if (static_branch_unlikely(&sev_es_enable_key))
|
|
__sev_es_nmi_complete();
|
|
}
|
|
extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd);
|
|
#else
|
|
static inline void sev_es_ist_enter(struct pt_regs *regs) { }
|
|
static inline void sev_es_ist_exit(void) { }
|
|
static inline int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) { return 0; }
|
|
static inline void sev_es_nmi_complete(void) { }
|
|
static inline int sev_es_efi_map_ghcbs(pgd_t *pgd) { return 0; }
|
|
#endif
|
|
|
|
#endif
|