x86/compressed: Export and rename add_identity_map()

SEV-specific code will need to add some additional mappings, but doing
this within ident_map_64.c requires some SEV-specific helpers to be
exported and some SEV-specific struct definitions to be pulled into
ident_map_64.c. Instead, export add_identity_map() so SEV-specific (and
other subsystem-specific) code can be better contained outside of
ident_map_64.c.

While at it, rename the function to kernel_add_identity_map(), similar
to the kernel_ident_mapping_init() function it relies upon.

No functional changes.

Suggested-by: Borislav Petkov <bp@alien8.de>
Signed-off-by: Michael Roth <michael.roth@amd.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lore.kernel.org/r/20220307213356.2797205-37-brijesh.singh@amd.com
This commit is contained in:
Michael Roth 2022-02-24 10:56:16 -06:00 committed by Borislav Petkov
parent 5f211f4fc4
commit a9ee679b1f
2 changed files with 10 additions and 9 deletions

View File

@ -90,7 +90,7 @@ static struct x86_mapping_info mapping_info;
/*
* Adds the specified range to the identity mappings.
*/
static void add_identity_map(unsigned long start, unsigned long end)
void kernel_add_identity_map(unsigned long start, unsigned long end)
{
int ret;
@ -157,11 +157,11 @@ void initialize_identity_maps(void *rmode)
* explicitly here in case the compressed kernel does not touch them,
* or does not touch all the pages covering them.
*/
add_identity_map((unsigned long)_head, (unsigned long)_end);
kernel_add_identity_map((unsigned long)_head, (unsigned long)_end);
boot_params = rmode;
add_identity_map((unsigned long)boot_params, (unsigned long)(boot_params + 1));
kernel_add_identity_map((unsigned long)boot_params, (unsigned long)(boot_params + 1));
cmdline = get_cmd_line_ptr();
add_identity_map(cmdline, cmdline + COMMAND_LINE_SIZE);
kernel_add_identity_map(cmdline, cmdline + COMMAND_LINE_SIZE);
/* Load the new page-table. */
sev_verify_cbit(top_level_pgt);
@ -246,10 +246,10 @@ static int set_clr_page_flags(struct x86_mapping_info *info,
* It should already exist, but keep things generic.
*
* To map the page just read from it and fault it in if there is no
* mapping yet. add_identity_map() can't be called here because that
* would unconditionally map the address on PMD level, destroying any
* PTE-level mappings that might already exist. Use assembly here so
* the access won't be optimized away.
* mapping yet. kernel_add_identity_map() can't be called here because
* that would unconditionally map the address on PMD level, destroying
* any PTE-level mappings that might already exist. Use assembly here
* so the access won't be optimized away.
*/
asm volatile("mov %[address], %%r9"
:: [address] "g" (*(unsigned long *)address)
@ -363,5 +363,5 @@ void do_boot_page_fault(struct pt_regs *regs, unsigned long error_code)
* Error code is sane - now identity map the 2M region around
* the faulting address.
*/
add_identity_map(address, end);
kernel_add_identity_map(address, end);
}

View File

@ -156,6 +156,7 @@ static inline int count_immovable_mem_regions(void) { return 0; }
#ifdef CONFIG_X86_5LEVEL
extern unsigned int __pgtable_l5_enabled, pgdir_shift, ptrs_per_p4d;
#endif
extern void kernel_add_identity_map(unsigned long start, unsigned long end);
/* Used by PAGE_KERN* macros: */
extern pteval_t __default_kernel_pte_mask;