KVM: PPC: Introduce FPR/VR accessor functions

Introduce accessor functions for floating point and vector registers
like the ones that exist for GPRs. Use these to replace the existing FPR
and VR accessor macros.

This will be important later for Nested APIv2 support which requires
additional functionality for accessing and modifying VCPU state.

Signed-off-by: Gautam Menghani <gautam@linux.ibm.com>
Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20230914030600.16993-3-jniethe5@gmail.com
This commit is contained in:
Jordan Niethe 2023-09-14 13:05:51 +10:00 committed by Michael Ellerman
parent 0e85b7df9c
commit 52425a3b3c
5 changed files with 110 additions and 45 deletions

View file

@ -403,6 +403,61 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
return vcpu->arch.fault_dar;
}
static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i)
{
return vcpu->arch.fp.fpr[i][TS_FPROFFSET];
}
static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val)
{
vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val;
}
static inline u64 kvmppc_get_fpscr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.fp.fpscr;
}
static inline void kvmppc_set_fpscr(struct kvm_vcpu *vcpu, u64 val)
{
vcpu->arch.fp.fpscr = val;
}
static inline u64 kvmppc_get_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j)
{
return vcpu->arch.fp.fpr[i][j];
}
static inline void kvmppc_set_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j,
u64 val)
{
vcpu->arch.fp.fpr[i][j] = val;
}
#ifdef CONFIG_ALTIVEC
static inline void kvmppc_get_vsx_vr(struct kvm_vcpu *vcpu, int i, vector128 *v)
{
*v = vcpu->arch.vr.vr[i];
}
static inline void kvmppc_set_vsx_vr(struct kvm_vcpu *vcpu, int i,
vector128 *val)
{
vcpu->arch.vr.vr[i] = *val;
}
static inline u32 kvmppc_get_vscr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.vr.vscr.u[3];
}
static inline void kvmppc_set_vscr(struct kvm_vcpu *vcpu, u32 val)
{
vcpu->arch.vr.vscr.u[3] = val;
}
#endif
/* Expiry time of vcpu DEC relative to host TB */
static inline u64 kvmppc_dec_expires_host_tb(struct kvm_vcpu *vcpu)
{

View file

@ -89,6 +89,16 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
return vcpu->arch.regs.nip;
}
static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val)
{
vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val;
}
static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i)
{
return vcpu->arch.fp.fpr[i][TS_FPROFFSET];
}
#ifdef CONFIG_BOOKE
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{

View file

@ -636,17 +636,17 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
i = id - KVM_REG_PPC_FPR0;
*val = get_reg_val(id, VCPU_FPR(vcpu, i));
*val = get_reg_val(id, kvmppc_get_fpr(vcpu, i));
break;
case KVM_REG_PPC_FPSCR:
*val = get_reg_val(id, vcpu->arch.fp.fpscr);
*val = get_reg_val(id, kvmppc_get_fpscr(vcpu));
break;
#ifdef CONFIG_VSX
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
i = id - KVM_REG_PPC_VSR0;
val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
val->vsxval[0] = kvmppc_get_vsx_fpr(vcpu, i, 0);
val->vsxval[1] = kvmppc_get_vsx_fpr(vcpu, i, 1);
} else {
r = -ENXIO;
}
@ -724,7 +724,7 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
i = id - KVM_REG_PPC_FPR0;
VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
kvmppc_set_fpr(vcpu, i, set_reg_val(id, *val));
break;
case KVM_REG_PPC_FPSCR:
vcpu->arch.fp.fpscr = set_reg_val(id, *val);
@ -733,8 +733,8 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
i = id - KVM_REG_PPC_VSR0;
vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
kvmppc_set_vsx_fpr(vcpu, i, 0, val->vsxval[0]);
kvmppc_set_vsx_fpr(vcpu, i, 1, val->vsxval[1]);
} else {
r = -ENXIO;
}
@ -765,7 +765,7 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
break;
#endif /* CONFIG_KVM_XIVE */
case KVM_REG_PPC_FSCR:
vcpu->arch.fscr = set_reg_val(id, *val);
kvmppc_set_fpscr(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_TAR:
vcpu->arch.tar = set_reg_val(id, *val);

View file

@ -250,7 +250,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
vcpu->arch.mmio_sp64_extend = 1;
emulated = kvmppc_handle_store(vcpu,
VCPU_FPR(vcpu, op.reg), size, 1);
kvmppc_get_fpr(vcpu, op.reg), size, 1);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);

View file

@ -934,11 +934,11 @@ static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
return;
if (index >= 32) {
val.vval = VCPU_VSX_VR(vcpu, index - 32);
kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval);
val.vsxval[offset] = gpr;
VCPU_VSX_VR(vcpu, index - 32) = val.vval;
kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
} else {
VCPU_VSX_FPR(vcpu, index, offset) = gpr;
kvmppc_set_vsx_fpr(vcpu, index, offset, gpr);
}
}
@ -949,13 +949,13 @@ static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
if (index >= 32) {
val.vval = VCPU_VSX_VR(vcpu, index - 32);
kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval);
val.vsxval[0] = gpr;
val.vsxval[1] = gpr;
VCPU_VSX_VR(vcpu, index - 32) = val.vval;
kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
} else {
VCPU_VSX_FPR(vcpu, index, 0) = gpr;
VCPU_VSX_FPR(vcpu, index, 1) = gpr;
kvmppc_set_vsx_fpr(vcpu, index, 0, gpr);
kvmppc_set_vsx_fpr(vcpu, index, 1, gpr);
}
}
@ -970,12 +970,12 @@ static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
val.vsx32val[1] = gpr;
val.vsx32val[2] = gpr;
val.vsx32val[3] = gpr;
VCPU_VSX_VR(vcpu, index - 32) = val.vval;
kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
} else {
val.vsx32val[0] = gpr;
val.vsx32val[1] = gpr;
VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
kvmppc_set_vsx_fpr(vcpu, index, 0, val.vsxval[0]);
kvmppc_set_vsx_fpr(vcpu, index, 1, val.vsxval[0]);
}
}
@ -991,15 +991,15 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
return;
if (index >= 32) {
val.vval = VCPU_VSX_VR(vcpu, index - 32);
kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval);
val.vsx32val[offset] = gpr32;
VCPU_VSX_VR(vcpu, index - 32) = val.vval;
kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
} else {
dword_offset = offset / 2;
word_offset = offset % 2;
val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
val.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, index, dword_offset);
val.vsx32val[word_offset] = gpr32;
VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
kvmppc_set_vsx_fpr(vcpu, index, dword_offset, val.vsxval[0]);
}
}
#endif /* CONFIG_VSX */
@ -1058,9 +1058,9 @@ static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
if (offset == -1)
return;
val.vval = VCPU_VSX_VR(vcpu, index);
kvmppc_get_vsx_vr(vcpu, index, &val.vval);
val.vsxval[offset] = gpr;
VCPU_VSX_VR(vcpu, index) = val.vval;
kvmppc_set_vsx_vr(vcpu, index, &val.vval);
}
static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
@ -1074,9 +1074,9 @@ static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
if (offset == -1)
return;
val.vval = VCPU_VSX_VR(vcpu, index);
kvmppc_get_vsx_vr(vcpu, index, &val.vval);
val.vsx32val[offset] = gpr32;
VCPU_VSX_VR(vcpu, index) = val.vval;
kvmppc_set_vsx_vr(vcpu, index, &val.vval);
}
static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
@ -1090,9 +1090,9 @@ static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
if (offset == -1)
return;
val.vval = VCPU_VSX_VR(vcpu, index);
kvmppc_get_vsx_vr(vcpu, index, &val.vval);
val.vsx16val[offset] = gpr16;
VCPU_VSX_VR(vcpu, index) = val.vval;
kvmppc_set_vsx_vr(vcpu, index, &val.vval);
}
static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
@ -1106,9 +1106,9 @@ static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
if (offset == -1)
return;
val.vval = VCPU_VSX_VR(vcpu, index);
kvmppc_get_vsx_vr(vcpu, index, &val.vval);
val.vsx8val[offset] = gpr8;
VCPU_VSX_VR(vcpu, index) = val.vval;
kvmppc_set_vsx_vr(vcpu, index, &val.vval);
}
#endif /* CONFIG_ALTIVEC */
@ -1194,14 +1194,14 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr);
break;
#ifdef CONFIG_PPC_BOOK3S
case KVM_MMIO_REG_QPR:
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
break;
case KVM_MMIO_REG_FQPR:
VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr);
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
break;
#endif
@ -1419,9 +1419,9 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
}
if (rs < 32) {
*val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
*val = kvmppc_get_vsx_fpr(vcpu, rs, vsx_offset);
} else {
reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
kvmppc_get_vsx_vr(vcpu, rs - 32, &reg.vval);
*val = reg.vsxval[vsx_offset];
}
break;
@ -1438,10 +1438,10 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
if (rs < 32) {
dword_offset = vsx_offset / 2;
word_offset = vsx_offset % 2;
reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
reg.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, rs, dword_offset);
*val = reg.vsx32val[word_offset];
} else {
reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
kvmppc_get_vsx_vr(vcpu, rs - 32, &reg.vval);
*val = reg.vsx32val[vsx_offset];
}
break;
@ -1556,7 +1556,7 @@ static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
if (vmx_offset == -1)
return -1;
reg.vval = VCPU_VSX_VR(vcpu, index);
kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
*val = reg.vsxval[vmx_offset];
return result;
@ -1574,7 +1574,7 @@ static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
if (vmx_offset == -1)
return -1;
reg.vval = VCPU_VSX_VR(vcpu, index);
kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
*val = reg.vsx32val[vmx_offset];
return result;
@ -1592,7 +1592,7 @@ static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
if (vmx_offset == -1)
return -1;
reg.vval = VCPU_VSX_VR(vcpu, index);
kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
*val = reg.vsx16val[vmx_offset];
return result;
@ -1610,7 +1610,7 @@ static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
if (vmx_offset == -1)
return -1;
reg.vval = VCPU_VSX_VR(vcpu, index);
kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
*val = reg.vsx8val[vmx_offset];
return result;
@ -1719,14 +1719,14 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = -ENXIO;
break;
}
val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
kvmppc_get_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval);
break;
case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
val = get_reg_val(reg->id, kvmppc_get_vscr(vcpu));
break;
case KVM_REG_PPC_VRSAVE:
val = get_reg_val(reg->id, vcpu->arch.vrsave);
@ -1770,14 +1770,14 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = -ENXIO;
break;
}
vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
kvmppc_set_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval);
break;
case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
kvmppc_set_vscr(vcpu, set_reg_val(reg->id, val));
break;
case KVM_REG_PPC_VRSAVE:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {