linux-stable/arch/x86/mm/mem_encrypt_identity.c

590 lines
17 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-only
/*
* AMD Memory Encryption Support
*
* Copyright (C) 2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*/
#define DISABLE_BRANCH_PROFILING
/*
* Since we're dealing with identity mappings, physical and virtual
* addresses are the same, so override these defines which are ultimately
* used by the headers in misc.h.
*/
#define __pa(x) ((unsigned long)(x))
#define __va(x) ((void *)((unsigned long)(x)))
/*
* Special hack: we have to be careful, because no indirections are
* allowed here, and paravirt_ops is a kind of one. As it will only run in
* baremetal anyway, we just keep it from happening. (This list needs to
* be extended when new paravirt and debugging variants are added.)
*/
#undef CONFIG_PARAVIRT
#undef CONFIG_PARAVIRT_XXL
#undef CONFIG_PARAVIRT_SPINLOCKS
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/mem_encrypt.h>
#include <asm/setup.h>
#include <asm/sections.h>
#include <asm/cmdline.h>
#include "mm_internal.h"
#define PGD_FLAGS _KERNPG_TABLE_NOENC
#define P4D_FLAGS _KERNPG_TABLE_NOENC
#define PUD_FLAGS _KERNPG_TABLE_NOENC
#define PMD_FLAGS _KERNPG_TABLE_NOENC
#define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
#define PMD_FLAGS_DEC PMD_FLAGS_LARGE
#define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_LARGE_CACHE_MASK) | \
(_PAGE_PAT_LARGE | _PAGE_PWT))
#define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC)
#define PTE_FLAGS (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL)
#define PTE_FLAGS_DEC PTE_FLAGS
#define PTE_FLAGS_DEC_WP ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
(_PAGE_PAT | _PAGE_PWT))
#define PTE_FLAGS_ENC (PTE_FLAGS | _PAGE_ENC)
struct sme_populate_pgd_data {
void *pgtable_area;
pgd_t *pgd;
pmdval_t pmd_flags;
pteval_t pte_flags;
unsigned long paddr;
unsigned long vaddr;
unsigned long vaddr_end;
};
x86/mm: Create a workarea in the kernel for SME early encryption In order for the kernel to be encrypted "in place" during boot, a workarea outside of the kernel must be used. This SME workarea used during early encryption of the kernel is situated on a 2MB boundary after the end of the kernel text, data, etc. sections (_end). This works well during initial boot of a compressed kernel because of the relocation used for decompression of the kernel. But when performing a kexec boot, there's a chance that the SME workarea may not be mapped by the kexec pagetables or that some of the other data used by kexec could exist in this range. Create a section for SME in vmlinux.lds.S. Position it after "_end", which is after "__end_of_kernel_reserve", so that the memory will be reclaimed during boot and since this area is all zeroes, it compresses well. This new section will be part of the kernel image, so kexec will account for it in pagetable mappings and placement of data after the kernel. Here's an example of a kernel size without and with the SME section: without: vmlinux: 36,501,616 bzImage: 6,497,344 100000000-47f37ffff : System RAM 1e4000000-1e47677d4 : Kernel code (0x7677d4) 1e47677d5-1e4e2e0bf : Kernel data (0x6c68ea) 1e5074000-1e5372fff : Kernel bss (0x2fefff) with: vmlinux: 44,419,408 bzImage: 6,503,136 880000000-c7ff7ffff : System RAM 8cf000000-8cf7677d4 : Kernel code (0x7677d4) 8cf7677d5-8cfe2e0bf : Kernel data (0x6c68ea) 8d0074000-8d0372fff : Kernel bss (0x2fefff) Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Baoquan He <bhe@redhat.com> Reviewed-by: Dave Hansen <dave.hansen@intel.com> Tested-by: Lianbo Jiang <lijiang@redhat.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Brijesh Singh <brijesh.singh@amd.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Joerg Roedel <jroedel@suse.de> Cc: Kees Cook <keescook@chromium.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: "Rafael Ávila de Espíndola" <rafael@espindo.la> Cc: Sami Tolvanen <samitolvanen@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "x86@kernel.org" <x86@kernel.org> Link: https://lkml.kernel.org/r/3c483262eb4077b1654b2052bd14a8d011bffde3.1560969363.git.thomas.lendacky@amd.com
2019-06-19 18:40:59 +00:00
/*
* This work area lives in the .init.scratch section, which lives outside of
* the kernel proper. It is sized to hold the intermediate copy buffer and
* more than enough pagetable pages.
*
* By using this section, the kernel can be encrypted in place and it
* avoids any possibility of boot parameters or initramfs images being
* placed such that the in-place encryption logic overwrites them. This
* section is 2MB aligned to allow for simple pagetable setup using only
* PMD entries (see vmlinux.lds.S).
*/
static char sme_workarea[2 * PMD_PAGE_SIZE] __section(".init.scratch");
x86/mm: Create a workarea in the kernel for SME early encryption In order for the kernel to be encrypted "in place" during boot, a workarea outside of the kernel must be used. This SME workarea used during early encryption of the kernel is situated on a 2MB boundary after the end of the kernel text, data, etc. sections (_end). This works well during initial boot of a compressed kernel because of the relocation used for decompression of the kernel. But when performing a kexec boot, there's a chance that the SME workarea may not be mapped by the kexec pagetables or that some of the other data used by kexec could exist in this range. Create a section for SME in vmlinux.lds.S. Position it after "_end", which is after "__end_of_kernel_reserve", so that the memory will be reclaimed during boot and since this area is all zeroes, it compresses well. This new section will be part of the kernel image, so kexec will account for it in pagetable mappings and placement of data after the kernel. Here's an example of a kernel size without and with the SME section: without: vmlinux: 36,501,616 bzImage: 6,497,344 100000000-47f37ffff : System RAM 1e4000000-1e47677d4 : Kernel code (0x7677d4) 1e47677d5-1e4e2e0bf : Kernel data (0x6c68ea) 1e5074000-1e5372fff : Kernel bss (0x2fefff) with: vmlinux: 44,419,408 bzImage: 6,503,136 880000000-c7ff7ffff : System RAM 8cf000000-8cf7677d4 : Kernel code (0x7677d4) 8cf7677d5-8cfe2e0bf : Kernel data (0x6c68ea) 8d0074000-8d0372fff : Kernel bss (0x2fefff) Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Baoquan He <bhe@redhat.com> Reviewed-by: Dave Hansen <dave.hansen@intel.com> Tested-by: Lianbo Jiang <lijiang@redhat.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Brijesh Singh <brijesh.singh@amd.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Joerg Roedel <jroedel@suse.de> Cc: Kees Cook <keescook@chromium.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: "Rafael Ávila de Espíndola" <rafael@espindo.la> Cc: Sami Tolvanen <samitolvanen@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "x86@kernel.org" <x86@kernel.org> Link: https://lkml.kernel.org/r/3c483262eb4077b1654b2052bd14a8d011bffde3.1560969363.git.thomas.lendacky@amd.com
2019-06-19 18:40:59 +00:00
static char sme_cmdline_arg[] __initdata = "mem_encrypt";
static char sme_cmdline_on[] __initdata = "on";
static char sme_cmdline_off[] __initdata = "off";
static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
{
unsigned long pgd_start, pgd_end, pgd_size;
pgd_t *pgd_p;
pgd_start = ppd->vaddr & PGDIR_MASK;
pgd_end = ppd->vaddr_end & PGDIR_MASK;
pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t);
pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
memset(pgd_p, 0, pgd_size);
}
static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pgd = ppd->pgd + pgd_index(ppd->vaddr);
if (pgd_none(*pgd)) {
p4d = ppd->pgtable_area;
memset(p4d, 0, sizeof(*p4d) * PTRS_PER_P4D);
ppd->pgtable_area += sizeof(*p4d) * PTRS_PER_P4D;
set_pgd(pgd, __pgd(PGD_FLAGS | __pa(p4d)));
}
p4d = p4d_offset(pgd, ppd->vaddr);
if (p4d_none(*p4d)) {
pud = ppd->pgtable_area;
memset(pud, 0, sizeof(*pud) * PTRS_PER_PUD);
ppd->pgtable_area += sizeof(*pud) * PTRS_PER_PUD;
set_p4d(p4d, __p4d(P4D_FLAGS | __pa(pud)));
}
pud = pud_offset(p4d, ppd->vaddr);
if (pud_none(*pud)) {
pmd = ppd->pgtable_area;
memset(pmd, 0, sizeof(*pmd) * PTRS_PER_PMD);
ppd->pgtable_area += sizeof(*pmd) * PTRS_PER_PMD;
set_pud(pud, __pud(PUD_FLAGS | __pa(pmd)));
}
if (pud_large(*pud))
return NULL;
return pud;
}
static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
{
pud_t *pud;
pmd_t *pmd;
pud = sme_prepare_pgd(ppd);
if (!pud)
return;
pmd = pmd_offset(pud, ppd->vaddr);
if (pmd_large(*pmd))
return;
set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags));
}
static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
{
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pud = sme_prepare_pgd(ppd);
if (!pud)
return;
pmd = pmd_offset(pud, ppd->vaddr);
if (pmd_none(*pmd)) {
pte = ppd->pgtable_area;
memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE);
ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE;
set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
}
if (pmd_large(*pmd))
return;
pte = pte_offset_map(pmd, ppd->vaddr);
if (pte_none(*pte))
set_pte(pte, __pte(ppd->paddr | ppd->pte_flags));
}
static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
{
while (ppd->vaddr < ppd->vaddr_end) {
sme_populate_pgd_large(ppd);
ppd->vaddr += PMD_PAGE_SIZE;
ppd->paddr += PMD_PAGE_SIZE;
}
}
static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
{
while (ppd->vaddr < ppd->vaddr_end) {
sme_populate_pgd(ppd);
ppd->vaddr += PAGE_SIZE;
ppd->paddr += PAGE_SIZE;
}
}
static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
pmdval_t pmd_flags, pteval_t pte_flags)
{
unsigned long vaddr_end;
ppd->pmd_flags = pmd_flags;
ppd->pte_flags = pte_flags;
/* Save original end value since we modify the struct value */
vaddr_end = ppd->vaddr_end;
/* If start is not 2MB aligned, create PTE entries */
ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE);
__sme_map_range_pte(ppd);
/* Create PMD entries */
ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK;
__sme_map_range_pmd(ppd);
/* If end is not 2MB aligned, create PTE entries */
ppd->vaddr_end = vaddr_end;
__sme_map_range_pte(ppd);
}
static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
{
__sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
}
static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
{
__sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
}
static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
{
__sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
}
static unsigned long __init sme_pgtable_calc(unsigned long len)
{
unsigned long entries = 0, tables = 0;
/*
* Perform a relatively simplistic calculation of the pagetable
* entries that are needed. Those mappings will be covered mostly
* by 2MB PMD entries so we can conservatively calculate the required
* number of P4D, PUD and PMD structures needed to perform the
* mappings. For mappings that are not 2MB aligned, PTE mappings
* would be needed for the start and end portion of the address range
* that fall outside of the 2MB alignment. This results in, at most,
* two extra pages to hold PTE entries for each range that is mapped.
* Incrementing the count for each covers the case where the addresses
* cross entries.
*/
/* PGDIR_SIZE is equal to P4D_SIZE on 4-level machine. */
if (PTRS_PER_P4D > 1)
entries += (DIV_ROUND_UP(len, PGDIR_SIZE) + 1) * sizeof(p4d_t) * PTRS_PER_P4D;
entries += (DIV_ROUND_UP(len, P4D_SIZE) + 1) * sizeof(pud_t) * PTRS_PER_PUD;
entries += (DIV_ROUND_UP(len, PUD_SIZE) + 1) * sizeof(pmd_t) * PTRS_PER_PMD;
entries += 2 * sizeof(pte_t) * PTRS_PER_PTE;
/*
* Now calculate the added pagetable structures needed to populate
* the new pagetables.
*/
if (PTRS_PER_P4D > 1)
tables += DIV_ROUND_UP(entries, PGDIR_SIZE) * sizeof(p4d_t) * PTRS_PER_P4D;
tables += DIV_ROUND_UP(entries, P4D_SIZE) * sizeof(pud_t) * PTRS_PER_PUD;
tables += DIV_ROUND_UP(entries, PUD_SIZE) * sizeof(pmd_t) * PTRS_PER_PMD;
return entries + tables;
}
void __init sme_encrypt_kernel(struct boot_params *bp)
{
unsigned long workarea_start, workarea_end, workarea_len;
unsigned long execute_start, execute_end, execute_len;
unsigned long kernel_start, kernel_end, kernel_len;
unsigned long initrd_start, initrd_end, initrd_len;
struct sme_populate_pgd_data ppd;
unsigned long pgtable_area_len;
unsigned long decrypted_base;
if (!sme_active())
return;
/*
* Prepare for encrypting the kernel and initrd by building new
* pagetables with the necessary attributes needed to encrypt the
* kernel in place.
*
* One range of virtual addresses will map the memory occupied
* by the kernel and initrd as encrypted.
*
* Another range of virtual addresses will map the memory occupied
* by the kernel and initrd as decrypted and write-protected.
*
* The use of write-protect attribute will prevent any of the
* memory from being cached.
*/
/* Physical addresses gives us the identity mapped virtual addresses */
kernel_start = __pa_symbol(_text);
kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE);
kernel_len = kernel_end - kernel_start;
initrd_start = 0;
initrd_end = 0;
initrd_len = 0;
#ifdef CONFIG_BLK_DEV_INITRD
initrd_len = (unsigned long)bp->hdr.ramdisk_size |
((unsigned long)bp->ext_ramdisk_size << 32);
if (initrd_len) {
initrd_start = (unsigned long)bp->hdr.ramdisk_image |
((unsigned long)bp->ext_ramdisk_image << 32);
initrd_end = PAGE_ALIGN(initrd_start + initrd_len);
initrd_len = initrd_end - initrd_start;
}
#endif
x86/mm: Create a workarea in the kernel for SME early encryption In order for the kernel to be encrypted "in place" during boot, a workarea outside of the kernel must be used. This SME workarea used during early encryption of the kernel is situated on a 2MB boundary after the end of the kernel text, data, etc. sections (_end). This works well during initial boot of a compressed kernel because of the relocation used for decompression of the kernel. But when performing a kexec boot, there's a chance that the SME workarea may not be mapped by the kexec pagetables or that some of the other data used by kexec could exist in this range. Create a section for SME in vmlinux.lds.S. Position it after "_end", which is after "__end_of_kernel_reserve", so that the memory will be reclaimed during boot and since this area is all zeroes, it compresses well. This new section will be part of the kernel image, so kexec will account for it in pagetable mappings and placement of data after the kernel. Here's an example of a kernel size without and with the SME section: without: vmlinux: 36,501,616 bzImage: 6,497,344 100000000-47f37ffff : System RAM 1e4000000-1e47677d4 : Kernel code (0x7677d4) 1e47677d5-1e4e2e0bf : Kernel data (0x6c68ea) 1e5074000-1e5372fff : Kernel bss (0x2fefff) with: vmlinux: 44,419,408 bzImage: 6,503,136 880000000-c7ff7ffff : System RAM 8cf000000-8cf7677d4 : Kernel code (0x7677d4) 8cf7677d5-8cfe2e0bf : Kernel data (0x6c68ea) 8d0074000-8d0372fff : Kernel bss (0x2fefff) Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Baoquan He <bhe@redhat.com> Reviewed-by: Dave Hansen <dave.hansen@intel.com> Tested-by: Lianbo Jiang <lijiang@redhat.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Brijesh Singh <brijesh.singh@amd.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Joerg Roedel <jroedel@suse.de> Cc: Kees Cook <keescook@chromium.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: "Rafael Ávila de Espíndola" <rafael@espindo.la> Cc: Sami Tolvanen <samitolvanen@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "x86@kernel.org" <x86@kernel.org> Link: https://lkml.kernel.org/r/3c483262eb4077b1654b2052bd14a8d011bffde3.1560969363.git.thomas.lendacky@amd.com
2019-06-19 18:40:59 +00:00
/*
* We're running identity mapped, so we must obtain the address to the
* SME encryption workarea using rip-relative addressing.
*/
asm ("lea sme_workarea(%%rip), %0"
: "=r" (workarea_start)
: "p" (sme_workarea));
/*
* Calculate required number of workarea bytes needed:
* executable encryption area size:
* stack page (PAGE_SIZE)
* encryption routine page (PAGE_SIZE)
* intermediate copy buffer (PMD_PAGE_SIZE)
* pagetable structures for the encryption of the kernel
* pagetable structures for workarea (in case not currently mapped)
*/
execute_start = workarea_start;
execute_end = execute_start + (PAGE_SIZE * 2) + PMD_PAGE_SIZE;
execute_len = execute_end - execute_start;
/*
* One PGD for both encrypted and decrypted mappings and a set of
* PUDs and PMDs for each of the encrypted and decrypted mappings.
*/
pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD;
pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
if (initrd_len)
pgtable_area_len += sme_pgtable_calc(initrd_len) * 2;
/* PUDs and PMDs needed in the current pagetables for the workarea */
pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len);
/*
* The total workarea includes the executable encryption area and
* the pagetable area. The start of the workarea is already 2MB
* aligned, align the end of the workarea on a 2MB boundary so that
* we don't try to create/allocate PTE entries from the workarea
* before it is mapped.
*/
workarea_len = execute_len + pgtable_area_len;
workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE);
/*
* Set the address to the start of where newly created pagetable
* structures (PGDs, PUDs and PMDs) will be allocated. New pagetable
* structures are created when the workarea is added to the current
* pagetables and when the new encrypted and decrypted kernel
* mappings are populated.
*/
ppd.pgtable_area = (void *)execute_end;
/*
* Make sure the current pagetable structure has entries for
* addressing the workarea.
*/
ppd.pgd = (pgd_t *)native_read_cr3_pa();
ppd.paddr = workarea_start;
ppd.vaddr = workarea_start;
ppd.vaddr_end = workarea_end;
sme_map_range_decrypted(&ppd);
/* Flush the TLB - no globals so cr3 is enough */
native_write_cr3(__native_read_cr3());
/*
* A new pagetable structure is being built to allow for the kernel
* and initrd to be encrypted. It starts with an empty PGD that will
* then be populated with new PUDs and PMDs as the encrypted and
* decrypted kernel mappings are created.
*/
ppd.pgd = ppd.pgtable_area;
memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD;
/*
* A different PGD index/entry must be used to get different
* pagetable entries for the decrypted mapping. Choose the next
* PGD index and convert it to a virtual address to be used as
* the base of the mapping.
*/
decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
if (initrd_len) {
unsigned long check_base;
check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1);
decrypted_base = max(decrypted_base, check_base);
}
decrypted_base <<= PGDIR_SHIFT;
/* Add encrypted kernel (identity) mappings */
ppd.paddr = kernel_start;
ppd.vaddr = kernel_start;
ppd.vaddr_end = kernel_end;
sme_map_range_encrypted(&ppd);
/* Add decrypted, write-protected kernel (non-identity) mappings */
ppd.paddr = kernel_start;
ppd.vaddr = kernel_start + decrypted_base;
ppd.vaddr_end = kernel_end + decrypted_base;
sme_map_range_decrypted_wp(&ppd);
if (initrd_len) {
/* Add encrypted initrd (identity) mappings */
ppd.paddr = initrd_start;
ppd.vaddr = initrd_start;
ppd.vaddr_end = initrd_end;
sme_map_range_encrypted(&ppd);
/*
* Add decrypted, write-protected initrd (non-identity) mappings
*/
ppd.paddr = initrd_start;
ppd.vaddr = initrd_start + decrypted_base;
ppd.vaddr_end = initrd_end + decrypted_base;
sme_map_range_decrypted_wp(&ppd);
}
/* Add decrypted workarea mappings to both kernel mappings */
ppd.paddr = workarea_start;
ppd.vaddr = workarea_start;
ppd.vaddr_end = workarea_end;
sme_map_range_decrypted(&ppd);
ppd.paddr = workarea_start;
ppd.vaddr = workarea_start + decrypted_base;
ppd.vaddr_end = workarea_end + decrypted_base;
sme_map_range_decrypted(&ppd);
/* Perform the encryption */
sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
kernel_len, workarea_start, (unsigned long)ppd.pgd);
if (initrd_len)
sme_encrypt_execute(initrd_start, initrd_start + decrypted_base,
initrd_len, workarea_start,
(unsigned long)ppd.pgd);
/*
* At this point we are running encrypted. Remove the mappings for
* the decrypted areas - all that is needed for this is to remove
* the PGD entry/entries.
*/
ppd.vaddr = kernel_start + decrypted_base;
ppd.vaddr_end = kernel_end + decrypted_base;
sme_clear_pgd(&ppd);
if (initrd_len) {
ppd.vaddr = initrd_start + decrypted_base;
ppd.vaddr_end = initrd_end + decrypted_base;
sme_clear_pgd(&ppd);
}
ppd.vaddr = workarea_start + decrypted_base;
ppd.vaddr_end = workarea_end + decrypted_base;
sme_clear_pgd(&ppd);
/* Flush the TLB - no globals so cr3 is enough */
native_write_cr3(__native_read_cr3());
}
void __init sme_enable(struct boot_params *bp)
{
const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
unsigned int eax, ebx, ecx, edx;
unsigned long feature_mask;
bool active_by_default;
unsigned long me_mask;
char buffer[16];
u64 msr;
/* Check for the SME/SEV support leaf */
eax = 0x80000000;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
if (eax < 0x8000001f)
return;
#define AMD_SME_BIT BIT(0)
#define AMD_SEV_BIT BIT(1)
x86/sev: Do not require Hypervisor CPUID bit for SEV guests A malicious hypervisor could disable the CPUID intercept for an SEV or SEV-ES guest and trick it into the no-SEV boot path, where it could potentially reveal secrets. This is not an issue for SEV-SNP guests, as the CPUID intercept can't be disabled for those. Remove the Hypervisor CPUID bit check from the SEV detection code to protect against this kind of attack and add a Hypervisor bit equals zero check to the SME detection path to prevent non-encrypted guests from trying to enable SME. This handles the following cases: 1) SEV(-ES) guest where CPUID intercept is disabled. The guest will still see leaf 0x8000001f and the SEV bit. It can retrieve the C-bit and boot normally. 2) Non-encrypted guests with intercepted CPUID will check the SEV_STATUS MSR and find it 0 and will try to enable SME. This will fail when the guest finds MSR_K8_SYSCFG to be zero, as it is emulated by KVM. But we can't rely on that, as there might be other hypervisors which return this MSR with bit 23 set. The Hypervisor bit check will prevent that the guest tries to enable SME in this case. 3) Non-encrypted guests on SEV capable hosts with CPUID intercept disabled (by a malicious hypervisor) will try to boot into the SME path. This will fail, but it is also not considered a problem because non-encrypted guests have no protection against the hypervisor anyway. [ bp: s/non-SEV/non-encrypted/g ] Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Borislav Petkov <bp@suse.de> Acked-by: Tom Lendacky <thomas.lendacky@amd.com> Link: https://lkml.kernel.org/r/20210312123824.306-3-joro@8bytes.org
2021-03-12 12:38:18 +00:00
/*
* Check for the SME/SEV feature:
* CPUID Fn8000_001F[EAX]
* - Bit 0 - Secure Memory Encryption support
* - Bit 1 - Secure Encrypted Virtualization support
* CPUID Fn8000_001F[EBX]
* - Bits 5:0 - Pagetable bit position used to indicate encryption
*/
eax = 0x8000001f;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
/* Check whether SEV or SME is supported */
if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT)))
return;
me_mask = 1UL << (ebx & 0x3f);
/* Check the SEV MSR whether SEV or SME is enabled */
sev_status = __rdmsr(MSR_AMD64_SEV);
feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
/* Check if memory encryption is enabled */
if (feature_mask == AMD_SME_BIT) {
x86/sev: Do not require Hypervisor CPUID bit for SEV guests A malicious hypervisor could disable the CPUID intercept for an SEV or SEV-ES guest and trick it into the no-SEV boot path, where it could potentially reveal secrets. This is not an issue for SEV-SNP guests, as the CPUID intercept can't be disabled for those. Remove the Hypervisor CPUID bit check from the SEV detection code to protect against this kind of attack and add a Hypervisor bit equals zero check to the SME detection path to prevent non-encrypted guests from trying to enable SME. This handles the following cases: 1) SEV(-ES) guest where CPUID intercept is disabled. The guest will still see leaf 0x8000001f and the SEV bit. It can retrieve the C-bit and boot normally. 2) Non-encrypted guests with intercepted CPUID will check the SEV_STATUS MSR and find it 0 and will try to enable SME. This will fail when the guest finds MSR_K8_SYSCFG to be zero, as it is emulated by KVM. But we can't rely on that, as there might be other hypervisors which return this MSR with bit 23 set. The Hypervisor bit check will prevent that the guest tries to enable SME in this case. 3) Non-encrypted guests on SEV capable hosts with CPUID intercept disabled (by a malicious hypervisor) will try to boot into the SME path. This will fail, but it is also not considered a problem because non-encrypted guests have no protection against the hypervisor anyway. [ bp: s/non-SEV/non-encrypted/g ] Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Borislav Petkov <bp@suse.de> Acked-by: Tom Lendacky <thomas.lendacky@amd.com> Link: https://lkml.kernel.org/r/20210312123824.306-3-joro@8bytes.org
2021-03-12 12:38:18 +00:00
/*
* No SME if Hypervisor bit is set. This check is here to
* prevent a guest from trying to enable SME. For running as a
* KVM guest the MSR_AMD64_SYSCFG will be sufficient, but there
x86/sev: Do not require Hypervisor CPUID bit for SEV guests A malicious hypervisor could disable the CPUID intercept for an SEV or SEV-ES guest and trick it into the no-SEV boot path, where it could potentially reveal secrets. This is not an issue for SEV-SNP guests, as the CPUID intercept can't be disabled for those. Remove the Hypervisor CPUID bit check from the SEV detection code to protect against this kind of attack and add a Hypervisor bit equals zero check to the SME detection path to prevent non-encrypted guests from trying to enable SME. This handles the following cases: 1) SEV(-ES) guest where CPUID intercept is disabled. The guest will still see leaf 0x8000001f and the SEV bit. It can retrieve the C-bit and boot normally. 2) Non-encrypted guests with intercepted CPUID will check the SEV_STATUS MSR and find it 0 and will try to enable SME. This will fail when the guest finds MSR_K8_SYSCFG to be zero, as it is emulated by KVM. But we can't rely on that, as there might be other hypervisors which return this MSR with bit 23 set. The Hypervisor bit check will prevent that the guest tries to enable SME in this case. 3) Non-encrypted guests on SEV capable hosts with CPUID intercept disabled (by a malicious hypervisor) will try to boot into the SME path. This will fail, but it is also not considered a problem because non-encrypted guests have no protection against the hypervisor anyway. [ bp: s/non-SEV/non-encrypted/g ] Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Borislav Petkov <bp@suse.de> Acked-by: Tom Lendacky <thomas.lendacky@amd.com> Link: https://lkml.kernel.org/r/20210312123824.306-3-joro@8bytes.org
2021-03-12 12:38:18 +00:00
* might be other hypervisors which emulate that MSR as non-zero
* or even pass it through to the guest.
* A malicious hypervisor can still trick a guest into this
* path, but there is no way to protect against that.
*/
eax = 1;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
if (ecx & BIT(31))
return;
/* For SME, check the SYSCFG MSR */
msr = __rdmsr(MSR_AMD64_SYSCFG);
if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
return;
} else {
/* SEV state cannot be controlled by a command line option */
sme_me_mask = me_mask;
physical_mask &= ~sme_me_mask;
return;
}
/*
* Fixups have not been applied to phys_base yet and we're running
* identity mapped, so we must obtain the address to the SME command
* line argument data using rip-relative addressing.
*/
asm ("lea sme_cmdline_arg(%%rip), %0"
: "=r" (cmdline_arg)
: "p" (sme_cmdline_arg));
asm ("lea sme_cmdline_on(%%rip), %0"
: "=r" (cmdline_on)
: "p" (sme_cmdline_on));
asm ("lea sme_cmdline_off(%%rip), %0"
: "=r" (cmdline_off)
: "p" (sme_cmdline_off));
if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT))
active_by_default = true;
else
active_by_default = false;
cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
((u64)bp->ext_cmd_line_ptr << 32));
cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer));
if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
sme_me_mask = me_mask;
else if (!strncmp(buffer, cmdline_off, sizeof(buffer)))
sme_me_mask = 0;
else
sme_me_mask = active_by_default ? me_mask : 0;
physical_mask &= ~sme_me_mask;
}