arm64: Rename the VHE switch to "finalise_el2"

as we are about to perform a lot more in 'mutate_to_vhe' than
we currently do, this function really becomes the point where
we finalise the basic EL2 configuration.

Reflect this into the code by renaming a bunch of things:
- HVC_VHE_RESTART -> HVC_FINALISE_EL2
- switch_to_vhe --> finalise_el2
- mutate_to_vhe -> __finalise_el2

No functional changes.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220630160500.1536744-2-maz@kernel.org
Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
Marc Zyngier 2022-06-30 17:04:52 +01:00 committed by Will Deacon
parent 0aaa68532e
commit 7ddb0c3df7
5 changed files with 22 additions and 22 deletions

View file

@ -60,12 +60,13 @@ these functions (see arch/arm{,64}/include/asm/virt.h):
* ::
x0 = HVC_VHE_RESTART (arm64 only)
x0 = HVC_FINALISE_EL2 (arm64 only)
Attempt to upgrade the kernel's exception level from EL1 to EL2 by enabling
the VHE mode. This is conditioned by the CPU supporting VHE, the EL2 MMU
being off, and VHE not being disabled by any other means (command line
option, for example).
Finish configuring EL2 depending on the command-line options,
including an attempt to upgrade the kernel's exception level from
EL1 to EL2 by enabling the VHE mode. This is conditioned by the CPU
supporting VHE, the EL2 MMU being off, and VHE not being disabled by
any other means (command line option, for example).
Any other value of r0/x0 triggers a hypervisor-specific handling,
which is not documented here.

View file

@ -36,9 +36,9 @@
#define HVC_RESET_VECTORS 2
/*
* HVC_VHE_RESTART - Upgrade the CPU from EL1 to EL2, if possible
* HVC_FINALISE_EL2 - Upgrade the CPU from EL1 to EL2, if possible
*/
#define HVC_VHE_RESTART 3
#define HVC_FINALISE_EL2 3
/* Max number of HYP stub hypercalls */
#define HVC_STUB_HCALL_NR 4

View file

@ -459,7 +459,7 @@ SYM_FUNC_START_LOCAL(__primary_switched)
mov x0, x22 // pass FDT address in x0
bl init_feature_override // Parse cpu feature overrides
mov x0, x20
bl switch_to_vhe // Prefer VHE if possible
bl finalise_el2 // Prefer VHE if possible
ldp x29, x30, [sp], #16
bl start_kernel
ASM_BUG()
@ -542,7 +542,7 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
eret
__cpu_stick_to_vhe:
mov x0, #HVC_VHE_RESTART
mov x0, #HVC_FINALISE_EL2
hvc #0
mov x0, #BOOT_CPU_MODE_EL2
ret
@ -592,7 +592,7 @@ SYM_FUNC_START_LOCAL(secondary_startup)
* Common entry point for secondary CPUs.
*/
mov x20, x0 // preserve boot mode
bl switch_to_vhe
bl finalise_el2
bl __cpu_secondary_check52bitva
#if VA_BITS > 48
ldr_l x0, vabits_actual

View file

@ -51,8 +51,8 @@ SYM_CODE_START_LOCAL(elx_sync)
msr vbar_el2, x1
b 9f
1: cmp x0, #HVC_VHE_RESTART
b.eq mutate_to_vhe
1: cmp x0, #HVC_FINALISE_EL2
b.eq __finalise_el2
2: cmp x0, #HVC_SOFT_RESTART
b.ne 3f
@ -73,8 +73,8 @@ SYM_CODE_START_LOCAL(elx_sync)
eret
SYM_CODE_END(elx_sync)
// nVHE? No way! Give me the real thing!
SYM_CODE_START_LOCAL(mutate_to_vhe)
SYM_CODE_START_LOCAL(__finalise_el2)
// nVHE? No way! Give me the real thing!
// Sanity check: MMU *must* be off
mrs x1, sctlr_el2
tbnz x1, #0, 1f
@ -140,10 +140,10 @@ SYM_CODE_START_LOCAL(mutate_to_vhe)
msr spsr_el1, x0
b enter_vhe
SYM_CODE_END(mutate_to_vhe)
SYM_CODE_END(__finalise_el2)
// At the point where we reach enter_vhe(), we run with
// the MMU off (which is enforced by mutate_to_vhe()).
// the MMU off (which is enforced by __finalise_el2()).
// We thus need to be in the idmap, or everything will
// explode when enabling the MMU.
@ -222,11 +222,11 @@ SYM_FUNC_START(__hyp_reset_vectors)
SYM_FUNC_END(__hyp_reset_vectors)
/*
* Entry point to switch to VHE if deemed capable
* Entry point to finalise EL2 and switch to VHE if deemed capable
*
* w0: boot mode, as returned by init_kernel_el()
*/
SYM_FUNC_START(switch_to_vhe)
SYM_FUNC_START(finalise_el2)
// Need to have booted at EL2
cmp w0, #BOOT_CPU_MODE_EL2
b.ne 1f
@ -236,9 +236,8 @@ SYM_FUNC_START(switch_to_vhe)
cmp x0, #CurrentEL_EL1
b.ne 1f
// Turn the world upside down
mov x0, #HVC_VHE_RESTART
mov x0, #HVC_FINALISE_EL2
hvc #0
1:
ret
SYM_FUNC_END(switch_to_vhe)
SYM_FUNC_END(finalise_el2)

View file

@ -100,7 +100,7 @@ SYM_FUNC_END(__cpu_suspend_enter)
.pushsection ".idmap.text", "awx"
SYM_CODE_START(cpu_resume)
bl init_kernel_el
bl switch_to_vhe
bl finalise_el2
bl __cpu_setup
/* enable the MMU early - so we can access sleep_save_stash by va */
adrp x1, swapper_pg_dir