ARM: KVM: handle 64bit values passed to mrcc or from mcrr instructions in BE case

In some cases the mcrr and mrrc instructions in combination with the ldrd
and strd instructions need to deal with 64bit value in memory. The ldrd
and strd instructions already handle endianness within word (register)
boundaries but to get effect of the whole 64bit value represented correctly,
rr_lo_hi macro is introduced and is used to swap registers positions when
the mcrr and mrrc instructions are used. That has the effect of swapping
two words.

Signed-off-by: Victor Kamensky <victor.kamensky@linaro.org>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
This commit is contained in:
Victor Kamensky 2014-06-12 09:30:02 -07:00 committed by Christoffer Dall
parent 64054c25cf
commit 19b0e60a63
4 changed files with 25 additions and 7 deletions

View file

@ -61,6 +61,24 @@
#define ARM_EXCEPTION_FIQ 6
#define ARM_EXCEPTION_HVC 7
/*
* The rr_lo_hi macro swaps a pair of registers depending on
* current endianness. It is used in conjunction with ldrd and strd
* instructions that load/store a 64-bit value from/to memory to/from
* a pair of registers which are used with the mrrc and mcrr instructions.
* If used with the ldrd/strd instructions, the a1 parameter is the first
* source/destination register and the a2 parameter is the second
* source/destination register. Note that the ldrd/strd instructions
* already swap the bytes within the words correctly according to the
* endianness setting, but the order of the registers need to be effectively
* swapped when used with the mrrc/mcrr instructions.
*/
#ifdef CONFIG_CPU_ENDIAN_BE8
#define rr_lo_hi(a1, a2) a2, a1
#else
#define rr_lo_hi(a1, a2) a1, a2
#endif
#ifndef __ASSEMBLY__
struct kvm;
struct kvm_vcpu;

View file

@ -71,7 +71,7 @@ __do_hyp_init:
bne phase2 @ Yes, second stage init
@ Set the HTTBR to point to the hypervisor PGD pointer passed
mcrr p15, 4, r2, r3, c2
mcrr p15, 4, rr_lo_hi(r2, r3), c2
@ Set the HTCR and VTCR to the same shareability and cacheability
@ settings as the non-secure TTBCR and with T0SZ == 0.
@ -137,7 +137,7 @@ phase2:
mov pc, r0
target: @ We're now in the trampoline code, switch page tables
mcrr p15, 4, r2, r3, c2
mcrr p15, 4, rr_lo_hi(r2, r3), c2
isb
@ Invalidate the old TLBs

View file

@ -52,7 +52,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
dsb ishst
add r0, r0, #KVM_VTTBR
ldrd r2, r3, [r0]
mcrr p15, 6, r2, r3, c2 @ Write VTTBR
mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR
isb
mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored)
dsb ish
@ -135,7 +135,7 @@ ENTRY(__kvm_vcpu_run)
ldr r1, [vcpu, #VCPU_KVM]
add r1, r1, #KVM_VTTBR
ldrd r2, r3, [r1]
mcrr p15, 6, r2, r3, c2 @ Write VTTBR
mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR
@ We're all done, just restore the GPRs and go to the guest
restore_guest_regs

View file

@ -520,7 +520,7 @@ ARM_BE8(rev r6, r6 )
mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
isb
mrrc p15, 3, r2, r3, c14 @ CNTV_CVAL
mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
ldr r4, =VCPU_TIMER_CNTV_CVAL
add r5, vcpu, r4
strd r2, r3, [r5]
@ -560,12 +560,12 @@ ARM_BE8(rev r6, r6 )
ldr r2, [r4, #KVM_TIMER_CNTVOFF]
ldr r3, [r4, #(KVM_TIMER_CNTVOFF + 4)]
mcrr p15, 4, r2, r3, c14 @ CNTVOFF
mcrr p15, 4, rr_lo_hi(r2, r3), c14 @ CNTVOFF
ldr r4, =VCPU_TIMER_CNTV_CVAL
add r5, vcpu, r4
ldrd r2, r3, [r5]
mcrr p15, 3, r2, r3, c14 @ CNTV_CVAL
mcrr p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
isb
ldr r2, [vcpu, #VCPU_TIMER_CNTV_CTL]