linux-stable/arch/powerpc/kvm/book3s_hv_tm.c
Linus Torvalds 192f0f8e9d powerpc updates for 5.3
Notable changes:
 
  - Removal of the NPU DMA code, used by the out-of-tree Nvidia driver, as well
    as some other functions only used by drivers that haven't (yet?) made it
    upstream.
 
  - A fix for a bug in our handling of hardware watchpoints (eg. perf record -e
    mem: ...) which could lead to register corruption and kernel crashes.
 
  - Enable HAVE_ARCH_HUGE_VMAP, which allows us to use large pages for vmalloc
    when using the Radix MMU.
 
  - A large but incremental rewrite of our exception handling code to use gas
    macros rather than multiple levels of nested CPP macros.
 
 And the usual small fixes, cleanups and improvements.
 
 Thanks to:
   Alastair D'Silva, Alexey Kardashevskiy, Andreas Schwab, Aneesh Kumar K.V, Anju
   T Sudhakar, Anton Blanchard, Arnd Bergmann, Athira Rajeev, Cédric Le Goater,
   Christian Lamparter, Christophe Leroy, Christophe Lombard, Christoph Hellwig,
   Daniel Axtens, Denis Efremov, Enrico Weigelt, Frederic Barrat, Gautham R.
   Shenoy, Geert Uytterhoeven, Geliang Tang, Gen Zhang, Greg Kroah-Hartman, Greg
   Kurz, Gustavo Romero, Krzysztof Kozlowski, Madhavan Srinivasan, Masahiro
   Yamada, Mathieu Malaterre, Michael Neuling, Nathan Lynch, Naveen N. Rao,
   Nicholas Piggin, Nishad Kamdar, Oliver O'Halloran, Qian Cai, Ravi Bangoria,
   Sachin Sant, Sam Bobroff, Satheesh Rajendran, Segher Boessenkool, Shaokun
   Zhang, Shawn Anastasio, Stewart Smith, Suraj Jitindar Singh, Thiago Jung
   Bauermann, YueHaibing.
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJdKVoLAAoJEFHr6jzI4aWA0kIP/A6shIbbE7H5W2hFrqt/PPPK
 3+VrvPKbOFF+W6hcE/RgSZmEnUo0svdNjHUd/eMfFS1vb/uRt2QDdrsHUNNwURQL
 M2mcLXFwYpnjSjb/XMgDbHpAQxjeGfTdYLonUIejN7Rk8KQUeLyKQ3SBn6kfMc46
 DnUUcPcjuRGaETUmVuZZ4e40ZWbJp8PKDrSJOuUrTPXMaK5ciNbZk5mCWXGbYl6G
 BMQAyv4ld/417rNTjBEP/T2foMJtioAt4W6mtlgdkOTdIEZnFU67nNxDBthNSu2c
 95+I+/sML4KOp1R4yhqLSLIDDbc3bg3c99hLGij0d948z3bkSZ8bwnPaUuy70C4v
 U8rvl/+N6C6H3DgSsPE/Gnkd8DnudqWY8nULc+8p3fXljGwww6/Qgt+6yCUn8BdW
 WgixkSjKgjDmzTw8trIUNEqORrTVle7cM2hIyIK2Q5T4kWzNQxrLZ/x/3wgoYjUa
 1KwIzaRo5JKZ9D3pJnJ5U+knE2/90rJIyfcp0W6ygyJsWKi2GNmq1eN3sKOw0IxH
 Tg86RENIA/rEMErNOfP45sLteMuTR7of7peCG3yumIOZqsDVYAzerpvtSgip2cvK
 aG+9HcYlBFOOOF9Dabi8GXsTBLXLfwiyjjLSpA9eXPwW8KObgiNfTZa7ujjTPvis
 4mk9oukFTFUpfhsMmI3T
 =3dBZ
 -----END PGP SIGNATURE-----

Merge tag 'powerpc-5.3-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc updates from Michael Ellerman:
 "Notable changes:

   - Removal of the NPU DMA code, used by the out-of-tree Nvidia driver,
     as well as some other functions only used by drivers that haven't
     (yet?) made it upstream.

   - A fix for a bug in our handling of hardware watchpoints (eg. perf
     record -e mem: ...) which could lead to register corruption and
     kernel crashes.

   - Enable HAVE_ARCH_HUGE_VMAP, which allows us to use large pages for
     vmalloc when using the Radix MMU.

   - A large but incremental rewrite of our exception handling code to
     use gas macros rather than multiple levels of nested CPP macros.

  And the usual small fixes, cleanups and improvements.

  Thanks to: Alastair D'Silva, Alexey Kardashevskiy, Andreas Schwab,
  Aneesh Kumar K.V, Anju T Sudhakar, Anton Blanchard, Arnd Bergmann,
  Athira Rajeev, Cédric Le Goater, Christian Lamparter, Christophe
  Leroy, Christophe Lombard, Christoph Hellwig, Daniel Axtens, Denis
  Efremov, Enrico Weigelt, Frederic Barrat, Gautham R. Shenoy, Geert
  Uytterhoeven, Geliang Tang, Gen Zhang, Greg Kroah-Hartman, Greg Kurz,
  Gustavo Romero, Krzysztof Kozlowski, Madhavan Srinivasan, Masahiro
  Yamada, Mathieu Malaterre, Michael Neuling, Nathan Lynch, Naveen N.
  Rao, Nicholas Piggin, Nishad Kamdar, Oliver O'Halloran, Qian Cai, Ravi
  Bangoria, Sachin Sant, Sam Bobroff, Satheesh Rajendran, Segher
  Boessenkool, Shaokun Zhang, Shawn Anastasio, Stewart Smith, Suraj
  Jitindar Singh, Thiago Jung Bauermann, YueHaibing"

* tag 'powerpc-5.3-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (163 commits)
  powerpc/powernv/idle: Fix restore of SPRN_LDBAR for POWER9 stop state.
  powerpc/eeh: Handle hugepages in ioremap space
  ocxl: Update for AFU descriptor template version 1.1
  powerpc/boot: pass CONFIG options in a simpler and more robust way
  powerpc/boot: add {get, put}_unaligned_be32 to xz_config.h
  powerpc/irq: Don't WARN continuously in arch_local_irq_restore()
  powerpc/module64: Use symbolic instructions names.
  powerpc/module32: Use symbolic instructions names.
  powerpc: Move PPC_HA() PPC_HI() and PPC_LO() to ppc-opcode.h
  powerpc/module64: Fix comment in R_PPC64_ENTRY handling
  powerpc/boot: Add lzo support for uImage
  powerpc/boot: Add lzma support for uImage
  powerpc/boot: don't force gzipped uImage
  powerpc/8xx: Add microcode patch to move SMC parameter RAM.
  powerpc/8xx: Use IO accessors in microcode programming.
  powerpc/8xx: replace #ifdefs by IS_ENABLED() in microcode.c
  powerpc/8xx: refactor programming of microcode CPM params.
  powerpc/8xx: refactor printing of microcode patch name.
  powerpc/8xx: Refactor microcode write
  powerpc/8xx: refactor writing of CPM microcode arrays
  ...
2019-07-13 16:08:36 -07:00

213 lines
6.8 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2017 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*/
#include <linux/kvm_host.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/kvm_book3s_64.h>
#include <asm/reg.h>
#include <asm/ppc-opcode.h>
static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause)
{
u64 texasr, tfiar;
u64 msr = vcpu->arch.shregs.msr;
tfiar = vcpu->arch.regs.nip & ~0x3ull;
texasr = (failure_cause << 56) | TEXASR_ABORT | TEXASR_FS | TEXASR_EXACT;
if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr))
texasr |= TEXASR_SUSP;
if (msr & MSR_PR) {
texasr |= TEXASR_PR;
tfiar |= 1;
}
vcpu->arch.tfiar = tfiar;
/* Preserve ROT and TL fields of existing TEXASR */
vcpu->arch.texasr = (vcpu->arch.texasr & 0x3ffffff) | texasr;
}
/*
* This gets called on a softpatch interrupt on POWER9 DD2.2 processors.
* We expect to find a TM-related instruction to be emulated. The
* instruction image is in vcpu->arch.emul_inst. If the guest was in
* TM suspended or transactional state, the checkpointed state has been
* reclaimed and is in the vcpu struct. The CPU is in virtual mode in
* host context.
*/
int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
{
u32 instr = vcpu->arch.emul_inst;
u64 msr = vcpu->arch.shregs.msr;
u64 newmsr, bescr;
int ra, rs;
switch (instr & 0xfc0007ff) {
case PPC_INST_RFID:
/* XXX do we need to check for PR=0 here? */
newmsr = vcpu->arch.shregs.srr1;
/* should only get here for Sx -> T1 transition */
WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
MSR_TM_TRANSACTIONAL(newmsr) &&
(newmsr & MSR_TM)));
newmsr = sanitize_msr(newmsr);
vcpu->arch.shregs.msr = newmsr;
vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
return RESUME_GUEST;
case PPC_INST_RFEBB:
if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
/* generate an illegal instruction interrupt */
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
return RESUME_GUEST;
}
/* check EBB facility is available */
if (!(vcpu->arch.hfscr & HFSCR_EBB)) {
/* generate an illegal instruction interrupt */
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
return RESUME_GUEST;
}
if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) {
/* generate a facility unavailable interrupt */
vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
((u64)FSCR_EBB_LG << 56);
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
return RESUME_GUEST;
}
bescr = vcpu->arch.bescr;
/* expect to see a S->T transition requested */
WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
((bescr >> 30) & 3) == 2));
bescr &= ~BESCR_GE;
if (instr & (1 << 11))
bescr |= BESCR_GE;
vcpu->arch.bescr = bescr;
msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
vcpu->arch.shregs.msr = msr;
vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
vcpu->arch.regs.nip = vcpu->arch.ebbrr;
return RESUME_GUEST;
case PPC_INST_MTMSRD:
/* XXX do we need to check for PR=0 here? */
rs = (instr >> 21) & 0x1f;
newmsr = kvmppc_get_gpr(vcpu, rs);
/* check this is a Sx -> T1 transition */
WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) &&
MSR_TM_TRANSACTIONAL(newmsr) &&
(newmsr & MSR_TM)));
/* mtmsrd doesn't change LE */
newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE);
newmsr = sanitize_msr(newmsr);
vcpu->arch.shregs.msr = newmsr;
return RESUME_GUEST;
case PPC_INST_TSR:
/* check for PR=1 and arch 2.06 bit set in PCR */
if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
/* generate an illegal instruction interrupt */
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
return RESUME_GUEST;
}
/* check for TM disabled in the HFSCR or MSR */
if (!(vcpu->arch.hfscr & HFSCR_TM)) {
/* generate an illegal instruction interrupt */
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
return RESUME_GUEST;
}
if (!(msr & MSR_TM)) {
/* generate a facility unavailable interrupt */
vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
((u64)FSCR_TM_LG << 56);
kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_FAC_UNAVAIL);
return RESUME_GUEST;
}
/* Set CR0 to indicate previous transactional state */
vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
/* L=1 => tresume, L=0 => tsuspend */
if (instr & (1 << 21)) {
if (MSR_TM_SUSPENDED(msr))
msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
} else {
if (MSR_TM_TRANSACTIONAL(msr))
msr = (msr & ~MSR_TS_MASK) | MSR_TS_S;
}
vcpu->arch.shregs.msr = msr;
return RESUME_GUEST;
case PPC_INST_TRECLAIM:
/* check for TM disabled in the HFSCR or MSR */
if (!(vcpu->arch.hfscr & HFSCR_TM)) {
/* generate an illegal instruction interrupt */
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
return RESUME_GUEST;
}
if (!(msr & MSR_TM)) {
/* generate a facility unavailable interrupt */
vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
((u64)FSCR_TM_LG << 56);
kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_FAC_UNAVAIL);
return RESUME_GUEST;
}
/* If no transaction active, generate TM bad thing */
if (!MSR_TM_ACTIVE(msr)) {
kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
return RESUME_GUEST;
}
/* If failure was not previously recorded, recompute TEXASR */
if (!(vcpu->arch.orig_texasr & TEXASR_FS)) {
ra = (instr >> 16) & 0x1f;
if (ra)
ra = kvmppc_get_gpr(vcpu, ra) & 0xff;
emulate_tx_failure(vcpu, ra);
}
copy_from_checkpoint(vcpu);
/* Set CR0 to indicate previous transactional state */
vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
return RESUME_GUEST;
case PPC_INST_TRECHKPT:
/* XXX do we need to check for PR=0 here? */
/* check for TM disabled in the HFSCR or MSR */
if (!(vcpu->arch.hfscr & HFSCR_TM)) {
/* generate an illegal instruction interrupt */
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
return RESUME_GUEST;
}
if (!(msr & MSR_TM)) {
/* generate a facility unavailable interrupt */
vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
((u64)FSCR_TM_LG << 56);
kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_FAC_UNAVAIL);
return RESUME_GUEST;
}
/* If transaction active or TEXASR[FS] = 0, bad thing */
if (MSR_TM_ACTIVE(msr) || !(vcpu->arch.texasr & TEXASR_FS)) {
kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
return RESUME_GUEST;
}
copy_to_checkpoint(vcpu);
/* Set CR0 to indicate previous transactional state */
vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
vcpu->arch.shregs.msr = msr | MSR_TS_S;
return RESUME_GUEST;
}
/* What should we do here? We didn't recognize the instruction */
WARN_ON_ONCE(1);
return RESUME_GUEST;
}