mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-31 08:28:13 +00:00
20f07a044a
SEV and TDX both protect guest memory from host accesses. They both use guest physical address bits to communicate to the hardware which pages receive protection or not. SEV and TDX both assume that all I/O (real devices and virtio) must be performed to pages *without* protection. To add this support, AMD SEV code forces force_dma_unencrypted() to decrypt DMA pages when DMA pages were allocated for I/O. It also uses swiotlb_update_mem_attributes() to update decryption bits in SWIOTLB DMA buffers. Since TDX also uses a similar memory sharing design, all the above mentioned changes can be reused. So move force_dma_unencrypted(), SWIOTLB update code and virtio changes out of mem_encrypt_amd.c to mem_encrypt.c. Introduce a new config option X86_MEM_ENCRYPT that can be selected by platforms which use x86 memory encryption features (needed in both AMD SEV and Intel TDX guest platforms). Since the code is moved from mem_encrypt_amd.c, inherit the same make flags. This is preparation for enabling TDX memory encryption support and it has no functional changes. Co-developed-by: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@linux.intel.com> Signed-off-by: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@linux.intel.com> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Andi Kleen <ak@linux.intel.com> Reviewed-by: Tony Luck <tony.luck@intel.com> Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com> Tested-by: Tom Lendacky <thomas.lendacky@amd.com> Link: https://lore.kernel.org/r/20211206135505.75045-4-kirill.shutemov@linux.intel.com
62 lines
2 KiB
Makefile
62 lines
2 KiB
Makefile
# SPDX-License-Identifier: GPL-2.0
|
|
# Kernel does not boot with instrumentation of tlb.c and mem_encrypt*.c
|
|
KCOV_INSTRUMENT_tlb.o := n
|
|
KCOV_INSTRUMENT_mem_encrypt.o := n
|
|
KCOV_INSTRUMENT_mem_encrypt_amd.o := n
|
|
KCOV_INSTRUMENT_mem_encrypt_identity.o := n
|
|
|
|
KASAN_SANITIZE_mem_encrypt.o := n
|
|
KASAN_SANITIZE_mem_encrypt_amd.o := n
|
|
KASAN_SANITIZE_mem_encrypt_identity.o := n
|
|
|
|
# Disable KCSAN entirely, because otherwise we get warnings that some functions
|
|
# reference __initdata sections.
|
|
KCSAN_SANITIZE := n
|
|
|
|
ifdef CONFIG_FUNCTION_TRACER
|
|
CFLAGS_REMOVE_mem_encrypt.o = -pg
|
|
CFLAGS_REMOVE_mem_encrypt_amd.o = -pg
|
|
CFLAGS_REMOVE_mem_encrypt_identity.o = -pg
|
|
endif
|
|
|
|
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o mmap.o \
|
|
pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o maccess.o
|
|
|
|
obj-y += pat/
|
|
|
|
# Make sure __phys_addr has no stackprotector
|
|
CFLAGS_physaddr.o := -fno-stack-protector
|
|
CFLAGS_setup_nx.o := -fno-stack-protector
|
|
CFLAGS_mem_encrypt_identity.o := -fno-stack-protector
|
|
|
|
CFLAGS_fault.o := -I $(srctree)/$(src)/../include/asm/trace
|
|
|
|
obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o
|
|
|
|
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
|
|
obj-$(CONFIG_PTDUMP_CORE) += dump_pagetables.o
|
|
obj-$(CONFIG_PTDUMP_DEBUGFS) += debug_pagetables.o
|
|
|
|
obj-$(CONFIG_HIGHMEM) += highmem_32.o
|
|
|
|
KASAN_SANITIZE_kasan_init_$(BITS).o := n
|
|
obj-$(CONFIG_KASAN) += kasan_init_$(BITS).o
|
|
|
|
obj-$(CONFIG_MMIOTRACE) += mmiotrace.o
|
|
mmiotrace-y := kmmio.o pf_in.o mmio-mod.o
|
|
obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
|
|
|
|
obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o
|
|
obj-$(CONFIG_AMD_NUMA) += amdtopology.o
|
|
obj-$(CONFIG_ACPI_NUMA) += srat.o
|
|
obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
|
|
|
|
obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o
|
|
obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o
|
|
obj-$(CONFIG_PAGE_TABLE_ISOLATION) += pti.o
|
|
|
|
obj-$(CONFIG_X86_MEM_ENCRYPT) += mem_encrypt.o
|
|
obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_amd.o
|
|
|
|
obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_identity.o
|
|
obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o
|