2019-05-27 06:55:01 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2009-02-10 20:10:44 +00:00
|
|
|
/*
|
2012-11-01 18:53:42 +00:00
|
|
|
* Copyright 2009 Freescale Semiconductor, Inc.
|
2009-02-10 20:10:44 +00:00
|
|
|
*
|
|
|
|
* provides masks and opcode images for use by code generation, emulation
|
|
|
|
* and for instructions that older assemblers might not know about
|
|
|
|
*/
|
|
|
|
#ifndef _ASM_POWERPC_PPC_OPCODE_H
|
|
|
|
#define _ASM_POWERPC_PPC_OPCODE_H
|
|
|
|
|
2018-07-05 16:24:57 +00:00
|
|
|
#include <asm/asm-const.h>
|
2009-02-10 20:10:44 +00:00
|
|
|
|
2012-06-25 13:33:22 +00:00
|
|
|
#define __REG_R0 0
|
|
|
|
#define __REG_R1 1
|
|
|
|
#define __REG_R2 2
|
|
|
|
#define __REG_R3 3
|
|
|
|
#define __REG_R4 4
|
|
|
|
#define __REG_R5 5
|
|
|
|
#define __REG_R6 6
|
|
|
|
#define __REG_R7 7
|
|
|
|
#define __REG_R8 8
|
|
|
|
#define __REG_R9 9
|
|
|
|
#define __REG_R10 10
|
|
|
|
#define __REG_R11 11
|
|
|
|
#define __REG_R12 12
|
|
|
|
#define __REG_R13 13
|
|
|
|
#define __REG_R14 14
|
|
|
|
#define __REG_R15 15
|
|
|
|
#define __REG_R16 16
|
|
|
|
#define __REG_R17 17
|
|
|
|
#define __REG_R18 18
|
|
|
|
#define __REG_R19 19
|
|
|
|
#define __REG_R20 20
|
|
|
|
#define __REG_R21 21
|
|
|
|
#define __REG_R22 22
|
|
|
|
#define __REG_R23 23
|
|
|
|
#define __REG_R24 24
|
|
|
|
#define __REG_R25 25
|
|
|
|
#define __REG_R26 26
|
|
|
|
#define __REG_R27 27
|
|
|
|
#define __REG_R28 28
|
|
|
|
#define __REG_R29 29
|
|
|
|
#define __REG_R30 30
|
|
|
|
#define __REG_R31 31
|
|
|
|
|
2012-06-25 13:33:24 +00:00
|
|
|
#define __REGA0_0 0
|
|
|
|
#define __REGA0_R1 1
|
|
|
|
#define __REGA0_R2 2
|
|
|
|
#define __REGA0_R3 3
|
|
|
|
#define __REGA0_R4 4
|
|
|
|
#define __REGA0_R5 5
|
|
|
|
#define __REGA0_R6 6
|
|
|
|
#define __REGA0_R7 7
|
|
|
|
#define __REGA0_R8 8
|
|
|
|
#define __REGA0_R9 9
|
|
|
|
#define __REGA0_R10 10
|
|
|
|
#define __REGA0_R11 11
|
|
|
|
#define __REGA0_R12 12
|
|
|
|
#define __REGA0_R13 13
|
|
|
|
#define __REGA0_R14 14
|
|
|
|
#define __REGA0_R15 15
|
|
|
|
#define __REGA0_R16 16
|
|
|
|
#define __REGA0_R17 17
|
|
|
|
#define __REGA0_R18 18
|
|
|
|
#define __REGA0_R19 19
|
|
|
|
#define __REGA0_R20 20
|
|
|
|
#define __REGA0_R21 21
|
|
|
|
#define __REGA0_R22 22
|
|
|
|
#define __REGA0_R23 23
|
|
|
|
#define __REGA0_R24 24
|
|
|
|
#define __REGA0_R25 25
|
|
|
|
#define __REGA0_R26 26
|
|
|
|
#define __REGA0_R27 27
|
|
|
|
#define __REGA0_R28 28
|
|
|
|
#define __REGA0_R29 29
|
|
|
|
#define __REGA0_R30 30
|
|
|
|
#define __REGA0_R31 31
|
|
|
|
|
2021-05-20 10:23:01 +00:00
|
|
|
/* For use with PPC_RAW_() macros */
|
|
|
|
#define _R0 0
|
|
|
|
#define _R1 1
|
|
|
|
#define _R2 2
|
|
|
|
#define _R3 3
|
|
|
|
#define _R4 4
|
|
|
|
#define _R5 5
|
|
|
|
#define _R6 6
|
|
|
|
#define _R7 7
|
|
|
|
#define _R8 8
|
|
|
|
#define _R9 9
|
|
|
|
#define _R10 10
|
|
|
|
#define _R11 11
|
|
|
|
#define _R12 12
|
|
|
|
#define _R13 13
|
|
|
|
#define _R14 14
|
|
|
|
#define _R15 15
|
|
|
|
#define _R16 16
|
|
|
|
#define _R17 17
|
|
|
|
#define _R18 18
|
|
|
|
#define _R19 19
|
|
|
|
#define _R20 20
|
|
|
|
#define _R21 21
|
|
|
|
#define _R22 22
|
|
|
|
#define _R23 23
|
|
|
|
#define _R24 24
|
|
|
|
#define _R25 25
|
|
|
|
#define _R26 26
|
|
|
|
#define _R27 27
|
|
|
|
#define _R28 28
|
|
|
|
#define _R29 29
|
|
|
|
#define _R30 30
|
|
|
|
#define _R31 31
|
|
|
|
|
2020-06-24 11:30:34 +00:00
|
|
|
#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
|
|
|
|
#define IMM_DS(i) ((uintptr_t)(i) & 0xfffc)
|
2020-10-11 05:09:07 +00:00
|
|
|
#define IMM_DQ(i) ((uintptr_t)(i) & 0xfff0)
|
|
|
|
#define IMM_D0(i) (((uintptr_t)(i) >> 16) & 0x3ffff)
|
|
|
|
#define IMM_D1(i) IMM_L(i)
|
2020-06-24 11:30:34 +00:00
|
|
|
|
2020-06-24 11:30:36 +00:00
|
|
|
/*
|
|
|
|
* 16-bit immediate helper macros: HA() is for use with sign-extending instrs
|
|
|
|
* (e.g. LD, ADDI). If the bottom 16 bits is "-ve", add another bit into the
|
|
|
|
* top half to negate the effect (i.e. 0xffff + 1 = 0x(1)0000).
|
|
|
|
*/
|
|
|
|
#define IMM_H(i) ((uintptr_t)(i)>>16)
|
|
|
|
#define IMM_HA(i) (((uintptr_t)(i)>>16) + \
|
|
|
|
(((uintptr_t)(i) & 0x8000) >> 15))
|
|
|
|
|
|
|
|
|
2013-04-28 05:20:07 +00:00
|
|
|
/* opcode and xopcode for instructions */
|
2022-03-30 14:07:17 +00:00
|
|
|
#define OP_PREFIX 1
|
|
|
|
#define OP_TRAP_64 2
|
|
|
|
#define OP_TRAP 3
|
powerpc: Reject probes on instructions that can't be single stepped
Per the ISA, a Trace interrupt is not generated for:
- [h|u]rfi[d]
- rfscv
- sc, scv, and Trap instructions that trap
- Power-Saving Mode instructions
- other instructions that cause interrupts (other than Trace interrupts)
- the first instructions of any interrupt handler (applies to Branch and Single Step tracing;
CIABR matches may still occur)
- instructions that are emulated by software
Add a helper to check for instructions belonging to the first four
categories above and to reject kprobes, uprobes and xmon breakpoints on
such instructions. We reject probing on instructions belonging to these
categories across all ISA versions and across both BookS and BookE.
For trap instructions, we can't know in advance if they can cause a
trap, and there is no good reason to allow probing on those. Also,
uprobes already refuses to probe trap instructions and kprobes does not
allow probes on trap instructions used for kernel warnings and bugs. As
such, stop allowing any type of probes/breakpoints on trap instruction
across uprobes, kprobes and xmon.
For some of the fp/altivec instructions that can generate an interrupt
and which we emulate in the kernel (altivec assist, for example), we
check and turn off single stepping in emulate_single_step().
Instructions generating a DSI are restarted and single stepping normally
completes once the instruction is completed.
In uprobes, if a single stepped instruction results in a non-fatal
signal to be delivered to the task, such signals are "delayed" until
after the instruction completes. For fatal signals, single stepping is
cancelled and the instruction restarted in-place so that core dump
captures proper addresses.
In kprobes, we do not allow probes on instructions having an extable
entry and we also do not allow probing interrupt vectors.
Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/f56ee979d50b8711fae350fc97870f3ca34acd75.1648648712.git.naveen.n.rao@linux.vnet.ibm.com
2022-03-30 14:07:18 +00:00
|
|
|
#define OP_SC 17
|
|
|
|
#define OP_19 19
|
2022-03-30 14:07:17 +00:00
|
|
|
#define OP_31 31
|
|
|
|
#define OP_LWZ 32
|
|
|
|
#define OP_LWZU 33
|
|
|
|
#define OP_LBZ 34
|
|
|
|
#define OP_LBZU 35
|
|
|
|
#define OP_STW 36
|
|
|
|
#define OP_STWU 37
|
|
|
|
#define OP_STB 38
|
|
|
|
#define OP_STBU 39
|
|
|
|
#define OP_LHZ 40
|
|
|
|
#define OP_LHZU 41
|
|
|
|
#define OP_LHA 42
|
|
|
|
#define OP_LHAU 43
|
|
|
|
#define OP_STH 44
|
|
|
|
#define OP_STHU 45
|
|
|
|
#define OP_LMW 46
|
|
|
|
#define OP_STMW 47
|
|
|
|
#define OP_LFS 48
|
|
|
|
#define OP_LFSU 49
|
|
|
|
#define OP_LFD 50
|
|
|
|
#define OP_LFDU 51
|
|
|
|
#define OP_STFS 52
|
|
|
|
#define OP_STFSU 53
|
|
|
|
#define OP_STFD 54
|
|
|
|
#define OP_STFDU 55
|
|
|
|
#define OP_LQ 56
|
|
|
|
#define OP_LD 58
|
|
|
|
#define OP_STD 62
|
2013-04-28 05:20:07 +00:00
|
|
|
|
powerpc: Reject probes on instructions that can't be single stepped
Per the ISA, a Trace interrupt is not generated for:
- [h|u]rfi[d]
- rfscv
- sc, scv, and Trap instructions that trap
- Power-Saving Mode instructions
- other instructions that cause interrupts (other than Trace interrupts)
- the first instructions of any interrupt handler (applies to Branch and Single Step tracing;
CIABR matches may still occur)
- instructions that are emulated by software
Add a helper to check for instructions belonging to the first four
categories above and to reject kprobes, uprobes and xmon breakpoints on
such instructions. We reject probing on instructions belonging to these
categories across all ISA versions and across both BookS and BookE.
For trap instructions, we can't know in advance if they can cause a
trap, and there is no good reason to allow probing on those. Also,
uprobes already refuses to probe trap instructions and kprobes does not
allow probes on trap instructions used for kernel warnings and bugs. As
such, stop allowing any type of probes/breakpoints on trap instruction
across uprobes, kprobes and xmon.
For some of the fp/altivec instructions that can generate an interrupt
and which we emulate in the kernel (altivec assist, for example), we
check and turn off single stepping in emulate_single_step().
Instructions generating a DSI are restarted and single stepping normally
completes once the instruction is completed.
In uprobes, if a single stepped instruction results in a non-fatal
signal to be delivered to the task, such signals are "delayed" until
after the instruction completes. For fatal signals, single stepping is
cancelled and the instruction restarted in-place so that core dump
captures proper addresses.
In kprobes, we do not allow probes on instructions having an extable
entry and we also do not allow probing interrupt vectors.
Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/f56ee979d50b8711fae350fc97870f3ca34acd75.1648648712.git.naveen.n.rao@linux.vnet.ibm.com
2022-03-30 14:07:18 +00:00
|
|
|
#define OP_19_XOP_RFID 18
|
|
|
|
#define OP_19_XOP_RFMCI 38
|
|
|
|
#define OP_19_XOP_RFDI 39
|
|
|
|
#define OP_19_XOP_RFI 50
|
|
|
|
#define OP_19_XOP_RFCI 51
|
|
|
|
#define OP_19_XOP_RFSCV 82
|
|
|
|
#define OP_19_XOP_HRFID 274
|
|
|
|
#define OP_19_XOP_URFID 306
|
|
|
|
#define OP_19_XOP_STOP 370
|
|
|
|
#define OP_19_XOP_DOZE 402
|
|
|
|
#define OP_19_XOP_NAP 434
|
|
|
|
#define OP_19_XOP_SLEEP 466
|
|
|
|
#define OP_19_XOP_RVWINKLE 498
|
|
|
|
|
2013-04-28 05:20:07 +00:00
|
|
|
#define OP_31_XOP_TRAP 4
|
KVM: PPC: Book3S: Add MMIO emulation for FP and VSX instructions
This patch provides the MMIO load/store emulation for instructions
of 'double & vector unsigned char & vector signed char & vector
unsigned short & vector signed short & vector unsigned int & vector
signed int & vector double '.
The instructions that this adds emulation for are:
- ldx, ldux, lwax,
- lfs, lfsx, lfsu, lfsux, lfd, lfdx, lfdu, lfdux,
- stfs, stfsx, stfsu, stfsux, stfd, stfdx, stfdu, stfdux, stfiwx,
- lxsdx, lxsspx, lxsiwax, lxsiwzx, lxvd2x, lxvw4x, lxvdsx,
- stxsdx, stxsspx, stxsiwx, stxvd2x, stxvw4x
[paulus@ozlabs.org - some cleanups, fixes and rework, make it
compile for Book E, fix build when PR KVM is built in]
Signed-off-by: Bin Lu <lblulb@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-02-21 13:12:36 +00:00
|
|
|
#define OP_31_XOP_LDX 21
|
2013-04-28 05:20:07 +00:00
|
|
|
#define OP_31_XOP_LWZX 23
|
KVM: PPC: Book3S: Add MMIO emulation for FP and VSX instructions
This patch provides the MMIO load/store emulation for instructions
of 'double & vector unsigned char & vector signed char & vector
unsigned short & vector signed short & vector unsigned int & vector
signed int & vector double '.
The instructions that this adds emulation for are:
- ldx, ldux, lwax,
- lfs, lfsx, lfsu, lfsux, lfd, lfdx, lfdu, lfdux,
- stfs, stfsx, stfsu, stfsux, stfd, stfdx, stfdu, stfdux, stfiwx,
- lxsdx, lxsspx, lxsiwax, lxsiwzx, lxvd2x, lxvw4x, lxvdsx,
- stxsdx, stxsspx, stxsiwx, stxvd2x, stxvw4x
[paulus@ozlabs.org - some cleanups, fixes and rework, make it
compile for Book E, fix build when PR KVM is built in]
Signed-off-by: Bin Lu <lblulb@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-02-21 13:12:36 +00:00
|
|
|
#define OP_31_XOP_LDUX 53
|
2013-04-28 05:20:07 +00:00
|
|
|
#define OP_31_XOP_DCBST 54
|
|
|
|
#define OP_31_XOP_LWZUX 55
|
|
|
|
#define OP_31_XOP_TRAP_64 68
|
|
|
|
#define OP_31_XOP_DCBF 86
|
|
|
|
#define OP_31_XOP_LBZX 87
|
2017-03-17 08:31:38 +00:00
|
|
|
#define OP_31_XOP_STDX 149
|
2013-04-28 05:20:07 +00:00
|
|
|
#define OP_31_XOP_STWX 151
|
KVM: PPC: Emulation for more integer loads and stores
This adds emulation for the following integer loads and stores,
thus enabling them to be used in a guest for accessing emulated
MMIO locations.
- lhaux
- lwaux
- lwzux
- ldu
- lwa
- stdux
- stwux
- stdu
- ldbrx
- stdbrx
Previously, most of these would cause an emulation failure exit to
userspace, though ldu and lwa got treated incorrectly as ld, and
stdu got treated incorrectly as std.
This also tidies up some of the formatting and updates the comment
listing instructions that still need to be implemented.
With this, all integer loads and stores that are defined in the Power
ISA v2.07 are emulated, except for those that are permitted to trap
when used on cache-inhibited or write-through mappings (and which do
in fact trap on POWER8), that is, lmw/stmw, lswi/stswi, lswx/stswx,
lq/stq, and l[bhwdq]arx/st[bhwdq]cx.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-03-21 04:43:47 +00:00
|
|
|
#define OP_31_XOP_STDUX 181
|
|
|
|
#define OP_31_XOP_STWUX 183
|
2013-04-28 05:20:07 +00:00
|
|
|
#define OP_31_XOP_STBX 215
|
|
|
|
#define OP_31_XOP_LBZUX 119
|
|
|
|
#define OP_31_XOP_STBUX 247
|
|
|
|
#define OP_31_XOP_LHZX 279
|
|
|
|
#define OP_31_XOP_LHZUX 311
|
KVM: PPC: Book3S HV: Virtualize doorbell facility on POWER9
On POWER9, we no longer have the restriction that we had on POWER8
where all threads in a core have to be in the same partition, so
the CPU threads are now independent. However, we still want to be
able to run guests with a virtual SMT topology, if only to allow
migration of guests from POWER8 systems to POWER9.
A guest that has a virtual SMT mode greater than 1 will expect to
be able to use the doorbell facility; it will expect the msgsndp
and msgclrp instructions to work appropriately and to be able to read
sensible values from the TIR (thread identification register) and
DPDES (directed privileged doorbell exception status) special-purpose
registers. However, since each CPU thread is a separate sub-processor
in POWER9, these instructions and registers can only be used within
a single CPU thread.
In order for these instructions to appear to act correctly according
to the guest's virtual SMT mode, we have to trap and emulate them.
We cause them to trap by clearing the HFSCR_MSGP bit in the HFSCR
register. The emulation is triggered by the hypervisor facility
unavailable interrupt that occurs when the guest uses them.
To cause a doorbell interrupt to occur within the guest, we set the
DPDES register to 1. If the guest has interrupts enabled, the CPU
will generate a doorbell interrupt and clear the DPDES register in
hardware. The DPDES hardware register for the guest is saved in the
vcpu->arch.vcore->dpdes field. Since this gets written by the guest
exit code, other VCPUs wishing to cause a doorbell interrupt don't
write that field directly, but instead set a vcpu->arch.doorbell_request
flag. This is consumed and set to 0 by the guest entry code, which
then sets DPDES to 1.
Emulating reads of the DPDES register is somewhat involved, because
it requires reading the doorbell pending interrupt status of all of the
VCPU threads in the virtual core, and if any of those VCPUs are
running, their doorbell status is only up-to-date in the hardware
DPDES registers of the CPUs where they are running. In order to get
a reasonable approximation of the current doorbell status, we send
those CPUs an IPI, causing an exit from the guest which will update
the vcpu->arch.vcore->dpdes field. We then use that value in
constructing the emulated DPDES register value.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-05-16 06:41:20 +00:00
|
|
|
#define OP_31_XOP_MSGSNDP 142
|
|
|
|
#define OP_31_XOP_MSGCLRP 174
|
powerpc: Reject probes on instructions that can't be single stepped
Per the ISA, a Trace interrupt is not generated for:
- [h|u]rfi[d]
- rfscv
- sc, scv, and Trap instructions that trap
- Power-Saving Mode instructions
- other instructions that cause interrupts (other than Trace interrupts)
- the first instructions of any interrupt handler (applies to Branch and Single Step tracing;
CIABR matches may still occur)
- instructions that are emulated by software
Add a helper to check for instructions belonging to the first four
categories above and to reject kprobes, uprobes and xmon breakpoints on
such instructions. We reject probing on instructions belonging to these
categories across all ISA versions and across both BookS and BookE.
For trap instructions, we can't know in advance if they can cause a
trap, and there is no good reason to allow probing on those. Also,
uprobes already refuses to probe trap instructions and kprobes does not
allow probes on trap instructions used for kernel warnings and bugs. As
such, stop allowing any type of probes/breakpoints on trap instruction
across uprobes, kprobes and xmon.
For some of the fp/altivec instructions that can generate an interrupt
and which we emulate in the kernel (altivec assist, for example), we
check and turn off single stepping in emulate_single_step().
Instructions generating a DSI are restarted and single stepping normally
completes once the instruction is completed.
In uprobes, if a single stepped instruction results in a non-fatal
signal to be delivered to the task, such signals are "delayed" until
after the instruction completes. For fatal signals, single stepping is
cancelled and the instruction restarted in-place so that core dump
captures proper addresses.
In kprobes, we do not allow probes on instructions having an extable
entry and we also do not allow probing interrupt vectors.
Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/f56ee979d50b8711fae350fc97870f3ca34acd75.1648648712.git.naveen.n.rao@linux.vnet.ibm.com
2022-03-30 14:07:18 +00:00
|
|
|
#define OP_31_XOP_MTMSR 146
|
|
|
|
#define OP_31_XOP_MTMSRD 178
|
KVM: PPC: Book3S HV: Implement H_TLB_INVALIDATE hcall
When running a nested (L2) guest the guest (L1) hypervisor will use
the H_TLB_INVALIDATE hcall when it needs to change the partition
scoped page tables or the partition table which it manages. It will
use this hcall in the situations where it would use a partition-scoped
tlbie instruction if it were running in hypervisor mode.
The H_TLB_INVALIDATE hcall can invalidate different scopes:
Invalidate TLB for a given target address:
- This invalidates a single L2 -> L1 pte
- We need to invalidate any L2 -> L0 shadow_pgtable ptes which map the L2
address space which is being invalidated. This is because a single
L2 -> L1 pte may have been mapped with more than one pte in the
L2 -> L0 page tables.
Invalidate the entire TLB for a given LPID or for all LPIDs:
- Invalidate the entire shadow_pgtable for a given nested guest, or
for all nested guests.
Invalidate the PWC (page walk cache) for a given LPID or for all LPIDs:
- We don't cache the PWC, so nothing to do.
Invalidate the entire TLB, PWC and partition table for a given/all LPIDs:
- Here we re-read the partition table entry and remove the nested state
for any nested guest for which the first doubleword of the partition
table entry is now zero.
The H_TLB_INVALIDATE hcall takes as parameters the tlbie instruction
word (of which only the RIC, PRS and R fields are used), the rS value
(giving the lpid, where required) and the rB value (giving the IS, AP
and EPN values).
[paulus@ozlabs.org - adapted to having the partition table in guest
memory, added the H_TLB_INVALIDATE implementation, removed tlbie
instruction emulation, reworded the commit message.]
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-10-08 05:31:09 +00:00
|
|
|
#define OP_31_XOP_TLBIE 306
|
2013-04-28 05:20:07 +00:00
|
|
|
#define OP_31_XOP_MFSPR 339
|
KVM: PPC: Book3S: Add MMIO emulation for FP and VSX instructions
This patch provides the MMIO load/store emulation for instructions
of 'double & vector unsigned char & vector signed char & vector
unsigned short & vector signed short & vector unsigned int & vector
signed int & vector double '.
The instructions that this adds emulation for are:
- ldx, ldux, lwax,
- lfs, lfsx, lfsu, lfsux, lfd, lfdx, lfdu, lfdux,
- stfs, stfsx, stfsu, stfsux, stfd, stfdx, stfdu, stfdux, stfiwx,
- lxsdx, lxsspx, lxsiwax, lxsiwzx, lxvd2x, lxvw4x, lxvdsx,
- stxsdx, stxsspx, stxsiwx, stxvd2x, stxvw4x
[paulus@ozlabs.org - some cleanups, fixes and rework, make it
compile for Book E, fix build when PR KVM is built in]
Signed-off-by: Bin Lu <lblulb@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-02-21 13:12:36 +00:00
|
|
|
#define OP_31_XOP_LWAX 341
|
2013-04-28 05:20:07 +00:00
|
|
|
#define OP_31_XOP_LHAX 343
|
KVM: PPC: Emulation for more integer loads and stores
This adds emulation for the following integer loads and stores,
thus enabling them to be used in a guest for accessing emulated
MMIO locations.
- lhaux
- lwaux
- lwzux
- ldu
- lwa
- stdux
- stwux
- stdu
- ldbrx
- stdbrx
Previously, most of these would cause an emulation failure exit to
userspace, though ldu and lwa got treated incorrectly as ld, and
stdu got treated incorrectly as std.
This also tidies up some of the formatting and updates the comment
listing instructions that still need to be implemented.
With this, all integer loads and stores that are defined in the Power
ISA v2.07 are emulated, except for those that are permitted to trap
when used on cache-inhibited or write-through mappings (and which do
in fact trap on POWER8), that is, lmw/stmw, lswi/stswi, lswx/stswx,
lq/stq, and l[bhwdq]arx/st[bhwdq]cx.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-03-21 04:43:47 +00:00
|
|
|
#define OP_31_XOP_LWAUX 373
|
2013-04-28 05:20:07 +00:00
|
|
|
#define OP_31_XOP_LHAUX 375
|
|
|
|
#define OP_31_XOP_STHX 407
|
|
|
|
#define OP_31_XOP_STHUX 439
|
|
|
|
#define OP_31_XOP_MTSPR 467
|
|
|
|
#define OP_31_XOP_DCBI 470
|
KVM: PPC: Emulation for more integer loads and stores
This adds emulation for the following integer loads and stores,
thus enabling them to be used in a guest for accessing emulated
MMIO locations.
- lhaux
- lwaux
- lwzux
- ldu
- lwa
- stdux
- stwux
- stdu
- ldbrx
- stdbrx
Previously, most of these would cause an emulation failure exit to
userspace, though ldu and lwa got treated incorrectly as ld, and
stdu got treated incorrectly as std.
This also tidies up some of the formatting and updates the comment
listing instructions that still need to be implemented.
With this, all integer loads and stores that are defined in the Power
ISA v2.07 are emulated, except for those that are permitted to trap
when used on cache-inhibited or write-through mappings (and which do
in fact trap on POWER8), that is, lmw/stmw, lswi/stswi, lswx/stswx,
lq/stq, and l[bhwdq]arx/st[bhwdq]cx.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-03-21 04:43:47 +00:00
|
|
|
#define OP_31_XOP_LDBRX 532
|
2013-04-28 05:20:07 +00:00
|
|
|
#define OP_31_XOP_LWBRX 534
|
|
|
|
#define OP_31_XOP_TLBSYNC 566
|
KVM: PPC: Emulation for more integer loads and stores
This adds emulation for the following integer loads and stores,
thus enabling them to be used in a guest for accessing emulated
MMIO locations.
- lhaux
- lwaux
- lwzux
- ldu
- lwa
- stdux
- stwux
- stdu
- ldbrx
- stdbrx
Previously, most of these would cause an emulation failure exit to
userspace, though ldu and lwa got treated incorrectly as ld, and
stdu got treated incorrectly as std.
This also tidies up some of the formatting and updates the comment
listing instructions that still need to be implemented.
With this, all integer loads and stores that are defined in the Power
ISA v2.07 are emulated, except for those that are permitted to trap
when used on cache-inhibited or write-through mappings (and which do
in fact trap on POWER8), that is, lmw/stmw, lswi/stswi, lswx/stswx,
lq/stq, and l[bhwdq]arx/st[bhwdq]cx.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-03-21 04:43:47 +00:00
|
|
|
#define OP_31_XOP_STDBRX 660
|
2013-04-28 05:20:07 +00:00
|
|
|
#define OP_31_XOP_STWBRX 662
|
KVM: PPC: Book3S: Add MMIO emulation for FP and VSX instructions
This patch provides the MMIO load/store emulation for instructions
of 'double & vector unsigned char & vector signed char & vector
unsigned short & vector signed short & vector unsigned int & vector
signed int & vector double '.
The instructions that this adds emulation for are:
- ldx, ldux, lwax,
- lfs, lfsx, lfsu, lfsux, lfd, lfdx, lfdu, lfdux,
- stfs, stfsx, stfsu, stfsux, stfd, stfdx, stfdu, stfdux, stfiwx,
- lxsdx, lxsspx, lxsiwax, lxsiwzx, lxvd2x, lxvw4x, lxvdsx,
- stxsdx, stxsspx, stxsiwx, stxvd2x, stxvw4x
[paulus@ozlabs.org - some cleanups, fixes and rework, make it
compile for Book E, fix build when PR KVM is built in]
Signed-off-by: Bin Lu <lblulb@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-02-21 13:12:36 +00:00
|
|
|
#define OP_31_XOP_STFSX 663
|
|
|
|
#define OP_31_XOP_STFSUX 695
|
|
|
|
#define OP_31_XOP_STFDX 727
|
|
|
|
#define OP_31_XOP_STFDUX 759
|
2013-04-28 05:20:07 +00:00
|
|
|
#define OP_31_XOP_LHBRX 790
|
2017-03-23 00:55:16 +00:00
|
|
|
#define OP_31_XOP_LFIWAX 855
|
|
|
|
#define OP_31_XOP_LFIWZX 887
|
2013-04-28 05:20:07 +00:00
|
|
|
#define OP_31_XOP_STHBRX 918
|
KVM: PPC: Book3S: Add MMIO emulation for FP and VSX instructions
This patch provides the MMIO load/store emulation for instructions
of 'double & vector unsigned char & vector signed char & vector
unsigned short & vector signed short & vector unsigned int & vector
signed int & vector double '.
The instructions that this adds emulation for are:
- ldx, ldux, lwax,
- lfs, lfsx, lfsu, lfsux, lfd, lfdx, lfdu, lfdux,
- stfs, stfsx, stfsu, stfsux, stfd, stfdx, stfdu, stfdux, stfiwx,
- lxsdx, lxsspx, lxsiwax, lxsiwzx, lxvd2x, lxvw4x, lxvdsx,
- stxsdx, stxsspx, stxsiwx, stxvd2x, stxvw4x
[paulus@ozlabs.org - some cleanups, fixes and rework, make it
compile for Book E, fix build when PR KVM is built in]
Signed-off-by: Bin Lu <lblulb@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-02-21 13:12:36 +00:00
|
|
|
#define OP_31_XOP_STFIWX 983
|
|
|
|
|
|
|
|
/* VSX Scalar Load Instructions */
|
|
|
|
#define OP_31_XOP_LXSDX 588
|
|
|
|
#define OP_31_XOP_LXSSPX 524
|
|
|
|
#define OP_31_XOP_LXSIWAX 76
|
|
|
|
#define OP_31_XOP_LXSIWZX 12
|
|
|
|
|
|
|
|
/* VSX Scalar Store Instructions */
|
|
|
|
#define OP_31_XOP_STXSDX 716
|
|
|
|
#define OP_31_XOP_STXSSPX 652
|
|
|
|
#define OP_31_XOP_STXSIWX 140
|
|
|
|
|
|
|
|
/* VSX Vector Load Instructions */
|
|
|
|
#define OP_31_XOP_LXVD2X 844
|
|
|
|
#define OP_31_XOP_LXVW4X 780
|
|
|
|
|
|
|
|
/* VSX Vector Load and Splat Instruction */
|
|
|
|
#define OP_31_XOP_LXVDSX 332
|
|
|
|
|
|
|
|
/* VSX Vector Store Instructions */
|
|
|
|
#define OP_31_XOP_STXVD2X 972
|
|
|
|
#define OP_31_XOP_STXVW4X 908
|
|
|
|
|
|
|
|
#define OP_31_XOP_LFSX 535
|
|
|
|
#define OP_31_XOP_LFSUX 567
|
|
|
|
#define OP_31_XOP_LFDX 599
|
|
|
|
#define OP_31_XOP_LFDUX 631
|
2013-04-28 05:20:07 +00:00
|
|
|
|
2018-02-03 20:24:26 +00:00
|
|
|
/* VMX Vector Load Instructions */
|
|
|
|
#define OP_31_XOP_LVX 103
|
|
|
|
|
|
|
|
/* VMX Vector Store Instructions */
|
|
|
|
#define OP_31_XOP_STVX 231
|
|
|
|
|
2009-02-10 20:10:44 +00:00
|
|
|
/* sorted alphabetically */
|
2020-06-09 07:06:08 +00:00
|
|
|
#define PPC_INST_BCCTR_FLUSH 0x4c400420
|
2017-06-08 15:36:09 +00:00
|
|
|
#define PPC_INST_COPY 0x7c20060c
|
2009-02-10 20:10:44 +00:00
|
|
|
#define PPC_INST_DCBA 0x7c0005ec
|
|
|
|
#define PPC_INST_DCBA_MASK 0xfc0007fe
|
2021-12-21 05:59:03 +00:00
|
|
|
#define PPC_INST_DSSALL 0x7e00066c
|
2009-02-10 20:10:44 +00:00
|
|
|
#define PPC_INST_ISEL 0x7c00001e
|
|
|
|
#define PPC_INST_ISEL_MASK 0xfc00003e
|
|
|
|
#define PPC_INST_LSWI 0x7c0004aa
|
|
|
|
#define PPC_INST_LSWX 0x7c00042a
|
|
|
|
#define PPC_INST_LWSYNC 0x7c2004ac
|
2013-07-03 21:26:47 +00:00
|
|
|
#define PPC_INST_SYNC 0x7c0004ac
|
|
|
|
#define PPC_INST_SYNC_MASK 0xfc0007fe
|
2009-02-10 20:10:44 +00:00
|
|
|
#define PPC_INST_MCRXR 0x7c000400
|
|
|
|
#define PPC_INST_MCRXR_MASK 0xfc0007fe
|
|
|
|
#define PPC_INST_MFSPR_PVR 0x7c1f42a6
|
2017-01-19 03:19:10 +00:00
|
|
|
#define PPC_INST_MFSPR_PVR_MASK 0xfc1ffffe
|
KVM: PPC: Book3S HV: Work around transactional memory bugs in POWER9
POWER9 has hardware bugs relating to transactional memory and thread
reconfiguration (changes to hardware SMT mode). Specifically, the core
does not have enough storage to store a complete checkpoint of all the
architected state for all four threads. The DD2.2 version of POWER9
includes hardware modifications designed to allow hypervisor software
to implement workarounds for these problems. This patch implements
those workarounds in KVM code so that KVM guests see a full, working
transactional memory implementation.
The problems center around the use of TM suspended state, where the
CPU has a checkpointed state but execution is not transactional. The
workaround is to implement a "fake suspend" state, which looks to the
guest like suspended state but the CPU does not store a checkpoint.
In this state, any instruction that would cause a transition to
transactional state (rfid, rfebb, mtmsrd, tresume) or would use the
checkpointed state (treclaim) causes a "soft patch" interrupt (vector
0x1500) to the hypervisor so that it can be emulated. The trechkpt
instruction also causes a soft patch interrupt.
On POWER9 DD2.2, we avoid returning to the guest in any state which
would require a checkpoint to be present. The trechkpt in the guest
entry path which would normally create that checkpoint is replaced by
either a transition to fake suspend state, if the guest is in suspend
state, or a rollback to the pre-transactional state if the guest is in
transactional state. Fake suspend state is indicated by a flag in the
PACA plus a new bit in the PSSCR. The new PSSCR bit is write-only and
reads back as 0.
On exit from the guest, if the guest is in fake suspend state, we still
do the treclaim instruction as we would in real suspend state, in order
to get into non-transactional state, but we do not save the resulting
register state since there was no checkpoint.
Emulation of the instructions that cause a softpatch interrupt is
handled in two paths. If the guest is in real suspend mode, we call
kvmhv_p9_tm_emulation_early() to handle the cases where the guest is
transitioning to transactional state. This is called before we do the
treclaim in the guest exit path; because we haven't done treclaim, we
can get back to the guest with the transaction still active. If the
instruction is a case that kvmhv_p9_tm_emulation_early() doesn't
handle, or if the guest is in fake suspend state, then we proceed to
do the complete guest exit path and subsequently call
kvmhv_p9_tm_emulation() in host context with the MMU on. This handles
all the cases including the cases that generate program interrupts
(illegal instruction or TM Bad Thing) and facility unavailable
interrupts.
The emulation is reasonably straightforward and is mostly concerned
with checking for exception conditions and updating the state of
registers such as MSR and CR0. The treclaim emulation takes care to
ensure that the TEXASR register gets updated as if it were the guest
treclaim instruction that had done failure recording, not the treclaim
done in hypervisor state in the guest exit path.
With this, the KVM_CAP_PPC_HTM capability returns true (1) even if
transactional memory is not available to host userspace.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-03-21 10:32:01 +00:00
|
|
|
#define PPC_INST_MTMSRD 0x7c000164
|
2022-03-01 01:13:15 +00:00
|
|
|
#define PPC_INST_PASTE 0x7c20070d
|
|
|
|
#define PPC_INST_PASTE_MASK 0xfc2007ff
|
2009-02-10 20:10:44 +00:00
|
|
|
#define PPC_INST_POPCNTB 0x7c0000f4
|
|
|
|
#define PPC_INST_POPCNTB_MASK 0xfc0007fe
|
KVM: PPC: Book3S HV: Work around transactional memory bugs in POWER9
POWER9 has hardware bugs relating to transactional memory and thread
reconfiguration (changes to hardware SMT mode). Specifically, the core
does not have enough storage to store a complete checkpoint of all the
architected state for all four threads. The DD2.2 version of POWER9
includes hardware modifications designed to allow hypervisor software
to implement workarounds for these problems. This patch implements
those workarounds in KVM code so that KVM guests see a full, working
transactional memory implementation.
The problems center around the use of TM suspended state, where the
CPU has a checkpointed state but execution is not transactional. The
workaround is to implement a "fake suspend" state, which looks to the
guest like suspended state but the CPU does not store a checkpoint.
In this state, any instruction that would cause a transition to
transactional state (rfid, rfebb, mtmsrd, tresume) or would use the
checkpointed state (treclaim) causes a "soft patch" interrupt (vector
0x1500) to the hypervisor so that it can be emulated. The trechkpt
instruction also causes a soft patch interrupt.
On POWER9 DD2.2, we avoid returning to the guest in any state which
would require a checkpoint to be present. The trechkpt in the guest
entry path which would normally create that checkpoint is replaced by
either a transition to fake suspend state, if the guest is in suspend
state, or a rollback to the pre-transactional state if the guest is in
transactional state. Fake suspend state is indicated by a flag in the
PACA plus a new bit in the PSSCR. The new PSSCR bit is write-only and
reads back as 0.
On exit from the guest, if the guest is in fake suspend state, we still
do the treclaim instruction as we would in real suspend state, in order
to get into non-transactional state, but we do not save the resulting
register state since there was no checkpoint.
Emulation of the instructions that cause a softpatch interrupt is
handled in two paths. If the guest is in real suspend mode, we call
kvmhv_p9_tm_emulation_early() to handle the cases where the guest is
transitioning to transactional state. This is called before we do the
treclaim in the guest exit path; because we haven't done treclaim, we
can get back to the guest with the transaction still active. If the
instruction is a case that kvmhv_p9_tm_emulation_early() doesn't
handle, or if the guest is in fake suspend state, then we proceed to
do the complete guest exit path and subsequently call
kvmhv_p9_tm_emulation() in host context with the MMU on. This handles
all the cases including the cases that generate program interrupts
(illegal instruction or TM Bad Thing) and facility unavailable
interrupts.
The emulation is reasonably straightforward and is mostly concerned
with checking for exception conditions and updating the state of
registers such as MSR and CR0. The treclaim emulation takes care to
ensure that the TEXASR register gets updated as if it were the guest
treclaim instruction that had done failure recording, not the treclaim
done in hypervisor state in the guest exit path.
With this, the KVM_CAP_PPC_HTM capability returns true (1) even if
transactional memory is not available to host userspace.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-03-21 10:32:01 +00:00
|
|
|
#define PPC_INST_RFEBB 0x4c000124
|
|
|
|
#define PPC_INST_RFID 0x4c000024
|
2011-03-02 15:18:48 +00:00
|
|
|
#define PPC_INST_MFSPR_DSCR 0x7c1102a6
|
2017-01-19 03:19:10 +00:00
|
|
|
#define PPC_INST_MFSPR_DSCR_MASK 0xfc1ffffe
|
2011-03-02 15:18:48 +00:00
|
|
|
#define PPC_INST_MTSPR_DSCR 0x7c1103a6
|
2017-01-19 03:19:10 +00:00
|
|
|
#define PPC_INST_MTSPR_DSCR_MASK 0xfc1ffffe
|
2013-05-01 20:06:33 +00:00
|
|
|
#define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6
|
2017-01-19 03:19:10 +00:00
|
|
|
#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1ffffe
|
2013-05-01 20:06:33 +00:00
|
|
|
#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
|
2017-01-19 03:19:10 +00:00
|
|
|
#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1ffffe
|
2009-02-10 20:10:44 +00:00
|
|
|
#define PPC_INST_STRING 0x7c00042a
|
|
|
|
#define PPC_INST_STRING_MASK 0xfc0007fe
|
|
|
|
#define PPC_INST_STRING_GEN_MASK 0xfc00067e
|
2021-05-11 12:18:33 +00:00
|
|
|
#define PPC_INST_SETB 0x7c000100
|
2009-02-10 20:10:44 +00:00
|
|
|
#define PPC_INST_STSWI 0x7c0005aa
|
|
|
|
#define PPC_INST_STSWX 0x7c00052a
|
2013-02-13 16:21:30 +00:00
|
|
|
#define PPC_INST_TRECHKPT 0x7c0007dd
|
|
|
|
#define PPC_INST_TRECLAIM 0x7c00075d
|
KVM: PPC: Book3S HV: Work around transactional memory bugs in POWER9
POWER9 has hardware bugs relating to transactional memory and thread
reconfiguration (changes to hardware SMT mode). Specifically, the core
does not have enough storage to store a complete checkpoint of all the
architected state for all four threads. The DD2.2 version of POWER9
includes hardware modifications designed to allow hypervisor software
to implement workarounds for these problems. This patch implements
those workarounds in KVM code so that KVM guests see a full, working
transactional memory implementation.
The problems center around the use of TM suspended state, where the
CPU has a checkpointed state but execution is not transactional. The
workaround is to implement a "fake suspend" state, which looks to the
guest like suspended state but the CPU does not store a checkpoint.
In this state, any instruction that would cause a transition to
transactional state (rfid, rfebb, mtmsrd, tresume) or would use the
checkpointed state (treclaim) causes a "soft patch" interrupt (vector
0x1500) to the hypervisor so that it can be emulated. The trechkpt
instruction also causes a soft patch interrupt.
On POWER9 DD2.2, we avoid returning to the guest in any state which
would require a checkpoint to be present. The trechkpt in the guest
entry path which would normally create that checkpoint is replaced by
either a transition to fake suspend state, if the guest is in suspend
state, or a rollback to the pre-transactional state if the guest is in
transactional state. Fake suspend state is indicated by a flag in the
PACA plus a new bit in the PSSCR. The new PSSCR bit is write-only and
reads back as 0.
On exit from the guest, if the guest is in fake suspend state, we still
do the treclaim instruction as we would in real suspend state, in order
to get into non-transactional state, but we do not save the resulting
register state since there was no checkpoint.
Emulation of the instructions that cause a softpatch interrupt is
handled in two paths. If the guest is in real suspend mode, we call
kvmhv_p9_tm_emulation_early() to handle the cases where the guest is
transitioning to transactional state. This is called before we do the
treclaim in the guest exit path; because we haven't done treclaim, we
can get back to the guest with the transaction still active. If the
instruction is a case that kvmhv_p9_tm_emulation_early() doesn't
handle, or if the guest is in fake suspend state, then we proceed to
do the complete guest exit path and subsequently call
kvmhv_p9_tm_emulation() in host context with the MMU on. This handles
all the cases including the cases that generate program interrupts
(illegal instruction or TM Bad Thing) and facility unavailable
interrupts.
The emulation is reasonably straightforward and is mostly concerned
with checking for exception conditions and updating the state of
registers such as MSR and CR0. The treclaim emulation takes care to
ensure that the TEXASR register gets updated as if it were the guest
treclaim instruction that had done failure recording, not the treclaim
done in hypervisor state in the guest exit path.
With this, the KVM_CAP_PPC_HTM capability returns true (1) even if
transactional memory is not available to host userspace.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-03-21 10:32:01 +00:00
|
|
|
#define PPC_INST_TSR 0x7c0005dd
|
2011-07-20 15:51:00 +00:00
|
|
|
#define PPC_INST_BRANCH_COND 0x40800000
|
|
|
|
|
2020-05-25 02:59:19 +00:00
|
|
|
/* Prefixes */
|
2020-05-25 02:59:20 +00:00
|
|
|
#define PPC_INST_LFS 0xc0000000
|
|
|
|
#define PPC_INST_STFS 0xd0000000
|
|
|
|
#define PPC_INST_LFD 0xc8000000
|
|
|
|
#define PPC_INST_STFD 0xd8000000
|
2020-05-25 02:59:19 +00:00
|
|
|
#define PPC_PREFIX_MLS 0x06000000
|
|
|
|
#define PPC_PREFIX_8LS 0x04000000
|
|
|
|
|
|
|
|
/* Prefixed instructions */
|
|
|
|
#define PPC_INST_PLD 0xe4000000
|
|
|
|
#define PPC_INST_PSTD 0xf4000000
|
|
|
|
|
2009-02-10 20:10:44 +00:00
|
|
|
/* macros to insert fields into opcodes */
|
2012-06-25 13:33:20 +00:00
|
|
|
#define ___PPC_RA(a) (((a) & 0x1f) << 16)
|
|
|
|
#define ___PPC_RB(b) (((b) & 0x1f) << 11)
|
2019-02-22 06:53:27 +00:00
|
|
|
#define ___PPC_RC(c) (((c) & 0x1f) << 6)
|
2012-06-25 13:33:20 +00:00
|
|
|
#define ___PPC_RS(s) (((s) & 0x1f) << 21)
|
|
|
|
#define ___PPC_RT(t) ___PPC_RS(t)
|
2016-07-13 09:35:20 +00:00
|
|
|
#define ___PPC_R(r) (((r) & 0x1) << 16)
|
|
|
|
#define ___PPC_PRS(prs) (((prs) & 0x1) << 17)
|
|
|
|
#define ___PPC_RIC(ric) (((ric) & 0x3) << 18)
|
2012-06-25 13:33:23 +00:00
|
|
|
#define __PPC_RA(a) ___PPC_RA(__REG_##a)
|
2012-06-25 13:33:24 +00:00
|
|
|
#define __PPC_RA0(a) ___PPC_RA(__REGA0_##a)
|
2012-06-25 13:33:23 +00:00
|
|
|
#define __PPC_RB(b) ___PPC_RB(__REG_##b)
|
|
|
|
#define __PPC_RS(s) ___PPC_RS(__REG_##s)
|
|
|
|
#define __PPC_RT(t) ___PPC_RT(__REG_##t)
|
powerpc: Emulate most Book I instructions in emulate_step()
This extends the emulate_step() function to handle a large proportion
of the Book I instructions implemented on current 64-bit server
processors. The aim is to handle all the load and store instructions
used in the kernel, plus all of the instructions that appear between
l[wd]arx and st[wd]cx., so this handles the Altivec/VMX lvx and stvx
and the VSX lxv2dx and stxv2dx instructions (implemented in POWER7).
The new code can emulate user mode instructions, and checks the
effective address for a load or store if the saved state is for
user mode. It doesn't handle little-endian mode at present.
For floating-point, Altivec/VMX and VSX instructions, it checks
that the saved MSR has the enable bit for the relevant facility
set, and if so, assumes that the FP/VMX/VSX registers contain
valid state, and does loads or stores directly to/from the
FP/VMX/VSX registers, using assembly helpers in ldstfp.S.
Instructions supported now include:
* Loads and stores, including some but not all VMX and VSX instructions,
and lmw/stmw
* Atomic loads and stores (l[dw]arx, st[dw]cx.)
* Arithmetic instructions (add, subtract, multiply, divide, etc.)
* Compare instructions
* Rotate and mask instructions
* Shift instructions
* Logical instructions (and, or, xor, etc.)
* Condition register logical instructions
* mtcrf, cntlz[wd], exts[bhw]
* isync, sync, lwsync, ptesync, eieio
* Cache operations (dcbf, dcbst, dcbt, dcbtst)
The overflow-checking arithmetic instructions are not included, but
they appear not to be ever used in C code.
This uses decimal values for the minor opcodes in the switch statements
because that is what appears in the Power ISA specification, thus it is
easier to check that they are correct if they are in decimal.
If this is used to single-step an instruction where a data breakpoint
interrupt occurred, then there is the possibility that the instruction
is a lwarx or ldarx. In that case we have to be careful not to lose the
reservation until we get to the matching st[wd]cx., or we'll never make
forward progress. One alternative is to try to arrange that we can
return from interrupts and handle data breakpoint interrupts without
losing the reservation, which means not using any spinlocks, mutexes,
or atomic ops (including bitops). That seems rather fragile. The
other alternative is to emulate the larx/stcx and all the instructions
in between. This is why this commit adds support for a wide range
of integer instructions.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2010-06-15 04:48:58 +00:00
|
|
|
#define __PPC_XA(a) ((((a) & 0x1f) << 16) | (((a) & 0x20) >> 3))
|
|
|
|
#define __PPC_XB(b) ((((b) & 0x1f) << 11) | (((b) & 0x20) >> 4))
|
2009-04-29 20:58:01 +00:00
|
|
|
#define __PPC_XS(s) ((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5))
|
powerpc: Emulate most Book I instructions in emulate_step()
This extends the emulate_step() function to handle a large proportion
of the Book I instructions implemented on current 64-bit server
processors. The aim is to handle all the load and store instructions
used in the kernel, plus all of the instructions that appear between
l[wd]arx and st[wd]cx., so this handles the Altivec/VMX lvx and stvx
and the VSX lxv2dx and stxv2dx instructions (implemented in POWER7).
The new code can emulate user mode instructions, and checks the
effective address for a load or store if the saved state is for
user mode. It doesn't handle little-endian mode at present.
For floating-point, Altivec/VMX and VSX instructions, it checks
that the saved MSR has the enable bit for the relevant facility
set, and if so, assumes that the FP/VMX/VSX registers contain
valid state, and does loads or stores directly to/from the
FP/VMX/VSX registers, using assembly helpers in ldstfp.S.
Instructions supported now include:
* Loads and stores, including some but not all VMX and VSX instructions,
and lmw/stmw
* Atomic loads and stores (l[dw]arx, st[dw]cx.)
* Arithmetic instructions (add, subtract, multiply, divide, etc.)
* Compare instructions
* Rotate and mask instructions
* Shift instructions
* Logical instructions (and, or, xor, etc.)
* Condition register logical instructions
* mtcrf, cntlz[wd], exts[bhw]
* isync, sync, lwsync, ptesync, eieio
* Cache operations (dcbf, dcbst, dcbt, dcbtst)
The overflow-checking arithmetic instructions are not included, but
they appear not to be ever used in C code.
This uses decimal values for the minor opcodes in the switch statements
because that is what appears in the Power ISA specification, thus it is
easier to check that they are correct if they are in decimal.
If this is used to single-step an instruction where a data breakpoint
interrupt occurred, then there is the possibility that the instruction
is a lwarx or ldarx. In that case we have to be careful not to lose the
reservation until we get to the matching st[wd]cx., or we'll never make
forward progress. One alternative is to try to arrange that we can
return from interrupts and handle data breakpoint interrupts without
losing the reservation, which means not using any spinlocks, mutexes,
or atomic ops (including bitops). That seems rather fragile. The
other alternative is to emulate the larx/stcx and all the instructions
in between. This is why this commit adds support for a wide range
of integer instructions.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2010-06-15 04:48:58 +00:00
|
|
|
#define __PPC_XT(s) __PPC_XS(s)
|
2020-10-11 05:09:07 +00:00
|
|
|
#define __PPC_XSP(s) ((((s) & 0x1e) | (((s) >> 5) & 0x1)) << 21)
|
|
|
|
#define __PPC_XTP(s) __PPC_XSP(s)
|
2009-04-29 20:58:01 +00:00
|
|
|
#define __PPC_T_TLB(t) (((t) & 0x3) << 21)
|
|
|
|
#define __PPC_WC(w) (((w) & 0x3) << 21)
|
2011-04-14 22:31:56 +00:00
|
|
|
#define __PPC_WS(w) (((w) & 0x1f) << 11)
|
2011-07-20 15:51:00 +00:00
|
|
|
#define __PPC_SH(s) __PPC_WS(s)
|
2017-02-08 08:57:29 +00:00
|
|
|
#define __PPC_SH64(s) (__PPC_SH(s) | (((s) & 0x20) >> 4))
|
2019-02-22 06:53:27 +00:00
|
|
|
#define __PPC_MB(s) ___PPC_RC(s)
|
2011-07-20 15:51:00 +00:00
|
|
|
#define __PPC_ME(s) (((s) & 0x1f) << 1)
|
2016-06-22 16:25:04 +00:00
|
|
|
#define __PPC_MB64(s) (__PPC_MB(s) | ((s) & 0x20))
|
|
|
|
#define __PPC_ME64(s) __PPC_MB64(s)
|
2011-07-20 15:51:00 +00:00
|
|
|
#define __PPC_BI(s) (((s) & 0x1f) << 16)
|
2012-10-02 15:52:19 +00:00
|
|
|
#define __PPC_CT(t) (((t) & 0x0f) << 21)
|
2018-01-12 12:45:23 +00:00
|
|
|
#define __PPC_SPR(r) ((((r) & 0x1f) << 16) | ((((r) >> 5) & 0x1f) << 11))
|
2018-06-07 01:57:52 +00:00
|
|
|
#define __PPC_RC21 (0x1 << 10)
|
2020-05-25 02:59:19 +00:00
|
|
|
#define __PPC_PRFX_R(r) (((r) & 0x1) << 20)
|
2011-04-14 22:31:56 +00:00
|
|
|
|
2019-05-03 06:40:15 +00:00
|
|
|
/*
|
|
|
|
* Both low and high 16 bits are added as SIGNED additions, so if low 16 bits
|
|
|
|
* has high bit set, high 16 bits must be adjusted. These macros do that (stolen
|
|
|
|
* from binutils).
|
|
|
|
*/
|
|
|
|
#define PPC_LO(v) ((v) & 0xffff)
|
|
|
|
#define PPC_HI(v) (((v) >> 16) & 0xffff)
|
|
|
|
#define PPC_HA(v) PPC_HI((v) + 0x8000)
|
2021-05-20 13:50:49 +00:00
|
|
|
#define PPC_HIGHER(v) (((v) >> 32) & 0xffff)
|
|
|
|
#define PPC_HIGHEST(v) (((v) >> 48) & 0xffff)
|
2019-05-03 06:40:15 +00:00
|
|
|
|
2022-05-09 05:36:14 +00:00
|
|
|
/* LI Field */
|
|
|
|
#define PPC_LI_MASK 0x03fffffc
|
|
|
|
#define PPC_LI(v) ((v) & PPC_LI_MASK)
|
|
|
|
|
2010-02-10 00:57:28 +00:00
|
|
|
/*
|
2010-03-11 05:33:25 +00:00
|
|
|
* Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
|
|
|
|
* larx with EH set as an illegal instruction.
|
2010-02-10 00:57:28 +00:00
|
|
|
*/
|
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
#define __PPC_EH(eh) (((eh) & 0x1) << 0)
|
|
|
|
#else
|
|
|
|
#define __PPC_EH(eh) 0
|
|
|
|
#endif
|
2009-02-10 20:10:44 +00:00
|
|
|
|
2020-06-24 11:30:33 +00:00
|
|
|
/* Base instruction encoding */
|
2020-06-24 11:30:38 +00:00
|
|
|
#define PPC_RAW_CP_ABORT (0x7c00068c)
|
2020-06-24 11:30:33 +00:00
|
|
|
#define PPC_RAW_COPY(a, b) (PPC_INST_COPY | ___PPC_RA(a) | ___PPC_RB(b))
|
2020-06-24 11:30:38 +00:00
|
|
|
#define PPC_RAW_DARN(t, l) (0x7c0005e6 | ___PPC_RT(t) | (((l) & 0x3) << 16))
|
|
|
|
#define PPC_RAW_DCBAL(a, b) (0x7c2005ec | __PPC_RA(a) | __PPC_RB(b))
|
|
|
|
#define PPC_RAW_DCBZL(a, b) (0x7c2007ec | __PPC_RA(a) | __PPC_RB(b))
|
|
|
|
#define PPC_RAW_LQARX(t, a, b, eh) (0x7c000228 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | __PPC_EH(eh))
|
|
|
|
#define PPC_RAW_LDARX(t, a, b, eh) (0x7c0000a8 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | __PPC_EH(eh))
|
|
|
|
#define PPC_RAW_LWARX(t, a, b, eh) (0x7c000028 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | __PPC_EH(eh))
|
|
|
|
#define PPC_RAW_PHWSYNC (0x7c8004ac)
|
|
|
|
#define PPC_RAW_PLWSYNC (0x7ca004ac)
|
|
|
|
#define PPC_RAW_STQCX(t, a, b) (0x7c00016d | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_MADDHD(t, a, b, c) (0x10000030 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | ___PPC_RC(c))
|
|
|
|
#define PPC_RAW_MADDHDU(t, a, b, c) (0x10000031 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | ___PPC_RC(c))
|
|
|
|
#define PPC_RAW_MADDLD(t, a, b, c) (0x10000033 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | ___PPC_RC(c))
|
|
|
|
#define PPC_RAW_MSGSND(b) (0x7c00019c | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_MSGSYNC (0x7c0006ec)
|
|
|
|
#define PPC_RAW_MSGCLR(b) (0x7c0001dc | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_MSGSNDP(b) (0x7c00011c | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_MSGCLRP(b) (0x7c00015c | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_PASTE(a, b) (0x7c20070d | ___PPC_RA(a) | ___PPC_RB(b))
|
2020-06-24 11:30:33 +00:00
|
|
|
#define PPC_RAW_POPCNTB(a, s) (PPC_INST_POPCNTB | __PPC_RA(a) | __PPC_RS(s))
|
2020-06-24 11:30:38 +00:00
|
|
|
#define PPC_RAW_POPCNTD(a, s) (0x7c0003f4 | __PPC_RA(a) | __PPC_RS(s))
|
|
|
|
#define PPC_RAW_POPCNTW(a, s) (0x7c0002f4 | __PPC_RA(a) | __PPC_RS(s))
|
|
|
|
#define PPC_RAW_RFCI (0x4c000066)
|
|
|
|
#define PPC_RAW_RFDI (0x4c00004e)
|
|
|
|
#define PPC_RAW_RFMCI (0x4c00004c)
|
|
|
|
#define PPC_RAW_TLBILX(t, a, b) (0x7c000024 | __PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b))
|
|
|
|
#define PPC_RAW_WAIT(w) (0x7c00007c | __PPC_WC(w))
|
|
|
|
#define PPC_RAW_TLBIE(lp, a) (0x7c000264 | ___PPC_RB(a) | ___PPC_RS(lp))
|
2020-06-24 11:30:33 +00:00
|
|
|
#define PPC_RAW_TLBIE_5(rb, rs, ric, prs, r) \
|
2020-06-24 11:30:38 +00:00
|
|
|
(0x7c000264 | ___PPC_RB(rb) | ___PPC_RS(rs) | ___PPC_RIC(ric) | ___PPC_PRS(prs) | ___PPC_R(r))
|
2020-06-24 11:30:33 +00:00
|
|
|
#define PPC_RAW_TLBIEL(rb, rs, ric, prs, r) \
|
2020-06-24 11:30:38 +00:00
|
|
|
(0x7c000224 | ___PPC_RB(rb) | ___PPC_RS(rs) | ___PPC_RIC(ric) | ___PPC_PRS(prs) | ___PPC_R(r))
|
2021-12-21 05:59:04 +00:00
|
|
|
#define PPC_RAW_TLBIEL_v205(rb, l) (0x7c000224 | ___PPC_RB(rb) | (l << 21))
|
2020-06-24 11:30:38 +00:00
|
|
|
#define PPC_RAW_TLBSRX_DOT(a, b) (0x7c0006a5 | __PPC_RA0(a) | __PPC_RB(b))
|
|
|
|
#define PPC_RAW_TLBIVAX(a, b) (0x7c000624 | __PPC_RA0(a) | __PPC_RB(b))
|
|
|
|
#define PPC_RAW_ERATWE(s, a, w) (0x7c0001a6 | __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w))
|
|
|
|
#define PPC_RAW_ERATRE(s, a, w) (0x7c000166 | __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w))
|
|
|
|
#define PPC_RAW_ERATILX(t, a, b) (0x7c000066 | __PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b))
|
|
|
|
#define PPC_RAW_ERATIVAX(s, a, b) (0x7c000666 | __PPC_RS(s) | __PPC_RA0(a) | __PPC_RB(b))
|
|
|
|
#define PPC_RAW_ERATSX(t, a, w) (0x7c000126 | __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
|
|
|
|
#define PPC_RAW_ERATSX_DOT(t, a, w) (0x7c000127 | __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
|
|
|
|
#define PPC_RAW_SLBFEE_DOT(t, b) (0x7c0007a7 | __PPC_RT(t) | __PPC_RB(b))
|
|
|
|
#define __PPC_RAW_SLBFEE_DOT(t, b) (0x7c0007a7 | ___PPC_RT(t) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_ICBT(c, a, b) (0x7c00002c | __PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b))
|
|
|
|
#define PPC_RAW_LBZCIX(t, a, b) (0x7c0006aa | __PPC_RT(t) | __PPC_RA(a) | __PPC_RB(b))
|
|
|
|
#define PPC_RAW_STBCIX(s, a, b) (0x7c0007aa | __PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b))
|
|
|
|
#define PPC_RAW_DCBFPS(a, b) (0x7c0000ac | ___PPC_RA(a) | ___PPC_RB(b) | (4 << 21))
|
|
|
|
#define PPC_RAW_DCBSTPS(a, b) (0x7c0000ac | ___PPC_RA(a) | ___PPC_RB(b) | (6 << 21))
|
2021-05-20 10:23:03 +00:00
|
|
|
#define PPC_RAW_SC() (0x44000002)
|
2021-05-20 10:23:09 +00:00
|
|
|
#define PPC_RAW_SYNC() (0x7c0004ac)
|
|
|
|
#define PPC_RAW_ISYNC() (0x4c00012c)
|
2021-05-20 10:23:03 +00:00
|
|
|
|
2020-06-24 11:30:33 +00:00
|
|
|
/*
|
|
|
|
* Define what the VSX XX1 form instructions will look like, then add
|
|
|
|
* the 128 bit load store instructions based on that.
|
|
|
|
*/
|
|
|
|
#define VSX_XX1(s, a, b) (__PPC_XS(s) | __PPC_RA(a) | __PPC_RB(b))
|
|
|
|
#define VSX_XX3(t, a, b) (__PPC_XT(t) | __PPC_XA(a) | __PPC_XB(b))
|
2020-06-24 11:30:38 +00:00
|
|
|
#define PPC_RAW_STXVD2X(s, a, b) (0x7c000798 | VSX_XX1((s), a, b))
|
|
|
|
#define PPC_RAW_LXVD2X(s, a, b) (0x7c000698 | VSX_XX1((s), a, b))
|
|
|
|
#define PPC_RAW_MFVRD(a, t) (0x7c000066 | VSX_XX1((t) + 32, a, R0))
|
|
|
|
#define PPC_RAW_MTVRD(t, a) (0x7c000166 | VSX_XX1((t) + 32, a, R0))
|
|
|
|
#define PPC_RAW_VPMSUMW(t, a, b) (0x10000488 | VSX_XX3((t), a, b))
|
|
|
|
#define PPC_RAW_VPMSUMD(t, a, b) (0x100004c8 | VSX_XX3((t), a, b))
|
|
|
|
#define PPC_RAW_XXLOR(t, a, b) (0xf0000490 | VSX_XX3((t), a, b))
|
|
|
|
#define PPC_RAW_XXSWAPD(t, a) (0xf0000250 | VSX_XX3((t), a, a))
|
|
|
|
#define PPC_RAW_XVCPSGNDP(t, a, b) ((0xf0000780 | VSX_XX3((t), (a), (b))))
|
2020-06-24 11:30:33 +00:00
|
|
|
#define PPC_RAW_VPERMXOR(vrt, vra, vrb, vrc) \
|
2020-06-24 11:30:38 +00:00
|
|
|
((0x1000002d | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | (((vrc) & 0x1f) << 6)))
|
2020-10-11 05:09:07 +00:00
|
|
|
#define PPC_RAW_LXVP(xtp, a, i) (0x18000000 | __PPC_XTP(xtp) | ___PPC_RA(a) | IMM_DQ(i))
|
|
|
|
#define PPC_RAW_STXVP(xsp, a, i) (0x18000001 | __PPC_XSP(xsp) | ___PPC_RA(a) | IMM_DQ(i))
|
|
|
|
#define PPC_RAW_LXVPX(xtp, a, b) (0x7c00029a | __PPC_XTP(xtp) | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_STXVPX(xsp, a, b) (0x7c00039a | __PPC_XSP(xsp) | ___PPC_RA(a) | ___PPC_RB(b))
|
2021-05-20 10:23:00 +00:00
|
|
|
#define PPC_RAW_PLXVP_P(xtp, i, a, pr) (PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i))
|
|
|
|
#define PPC_RAW_PLXVP_S(xtp, i, a, pr) (0xe8000000 | __PPC_XTP(xtp) | ___PPC_RA(a) | IMM_D1(i))
|
|
|
|
#define PPC_RAW_PSTXVP_P(xsp, i, a, pr) (PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i))
|
|
|
|
#define PPC_RAW_PSTXVP_S(xsp, i, a, pr) (0xf8000000 | __PPC_XSP(xsp) | ___PPC_RA(a) | IMM_D1(i))
|
2020-06-24 11:30:38 +00:00
|
|
|
#define PPC_RAW_NAP (0x4c000364)
|
|
|
|
#define PPC_RAW_SLEEP (0x4c0003a4)
|
|
|
|
#define PPC_RAW_WINKLE (0x4c0003e4)
|
|
|
|
#define PPC_RAW_STOP (0x4c0002e4)
|
|
|
|
#define PPC_RAW_CLRBHRB (0x7c00035c)
|
|
|
|
#define PPC_RAW_MFBHRBE(r, n) (0x7c00025c | __PPC_RT(r) | (((n) & 0x3ff) << 11))
|
2020-06-24 11:30:33 +00:00
|
|
|
#define PPC_RAW_TRECHKPT (PPC_INST_TRECHKPT)
|
|
|
|
#define PPC_RAW_TRECLAIM(r) (PPC_INST_TRECLAIM | __PPC_RA(r))
|
2020-06-24 11:30:38 +00:00
|
|
|
#define PPC_RAW_TABORT(r) (0x7c00071d | __PPC_RA(r))
|
2020-06-24 11:30:33 +00:00
|
|
|
#define TMRN(x) ((((x) & 0x1f) << 16) | (((x) & 0x3e0) << 6))
|
2020-06-24 11:30:38 +00:00
|
|
|
#define PPC_RAW_MTTMR(tmr, r) (0x7c0003dc | TMRN(tmr) | ___PPC_RS(r))
|
|
|
|
#define PPC_RAW_MFTMR(tmr, r) (0x7c0002dc | TMRN(tmr) | ___PPC_RT(r))
|
|
|
|
#define PPC_RAW_ICSWX(s, a, b) (0x7c00032d | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_ICSWEPX(s, a, b) (0x7c00076d | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_SLBIA(IH) (0x7c0003e4 | (((IH) & 0x7) << 21))
|
2020-06-24 11:30:33 +00:00
|
|
|
#define PPC_RAW_VCMPEQUD_RC(vrt, vra, vrb) \
|
2020-06-24 11:30:38 +00:00
|
|
|
(0x100000c7 | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | __PPC_RC21)
|
2020-06-24 11:30:33 +00:00
|
|
|
#define PPC_RAW_VCMPEQUB_RC(vrt, vra, vrb) \
|
2020-06-24 11:30:38 +00:00
|
|
|
(0x10000006 | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | __PPC_RC21)
|
2022-05-09 05:36:23 +00:00
|
|
|
#define PPC_RAW_LD(r, base, i) (0xe8000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_DS(i))
|
2020-06-24 11:30:38 +00:00
|
|
|
#define PPC_RAW_LWZ(r, base, i) (0x80000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
|
|
|
|
#define PPC_RAW_LWZX(t, a, b) (0x7c00002e | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
|
2022-05-09 05:36:23 +00:00
|
|
|
#define PPC_RAW_STD(r, base, i) (0xf8000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_DS(i))
|
2020-06-24 11:30:38 +00:00
|
|
|
#define PPC_RAW_STDCX(s, a, b) (0x7c0001ad | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_LFSX(t, a, b) (0x7c00042e | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_STFSX(s, a, b) (0x7c00052e | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_LFDX(t, a, b) (0x7c0004ae | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_STFDX(s, a, b) (0x7c0005ae | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_LVX(t, a, b) (0x7c0000ce | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_STVX(s, a, b) (0x7c0001ce | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
|
2021-03-22 16:37:51 +00:00
|
|
|
#define PPC_RAW_ADDE(t, a, b) (0x7c000114 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_ADDZE(t, a) (0x7c000194 | ___PPC_RT(t) | ___PPC_RA(a))
|
|
|
|
#define PPC_RAW_ADDME(t, a) (0x7c0001d4 | ___PPC_RT(t) | ___PPC_RA(a))
|
2022-05-09 05:36:23 +00:00
|
|
|
#define PPC_RAW_ADD(t, a, b) (0x7c000214 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_ADD_DOT(t, a, b) (0x7c000214 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
|
2020-06-24 11:30:38 +00:00
|
|
|
#define PPC_RAW_ADDC(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_ADDC_DOT(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
|
2021-05-20 10:23:11 +00:00
|
|
|
#define PPC_RAW_NOP() PPC_RAW_ORI(0, 0, 0)
|
2021-05-20 10:23:05 +00:00
|
|
|
#define PPC_RAW_BLR() (0x4e800020)
|
2020-06-24 11:30:38 +00:00
|
|
|
#define PPC_RAW_BLRL() (0x4e800021)
|
|
|
|
#define PPC_RAW_MTLR(r) (0x7c0803a6 | ___PPC_RT(r))
|
2021-05-20 10:23:07 +00:00
|
|
|
#define PPC_RAW_MFLR(t) (0x7c0802a6 | ___PPC_RT(t))
|
2021-05-20 10:23:04 +00:00
|
|
|
#define PPC_RAW_BCTR() (0x4e800420)
|
2021-05-20 10:23:03 +00:00
|
|
|
#define PPC_RAW_BCTRL() (0x4e800421)
|
2021-05-20 10:23:04 +00:00
|
|
|
#define PPC_RAW_MTCTR(r) (0x7c0903a6 | ___PPC_RT(r))
|
|
|
|
#define PPC_RAW_ADDI(d, a, i) (0x38000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
|
2020-06-24 11:30:36 +00:00
|
|
|
#define PPC_RAW_LI(r, i) PPC_RAW_ADDI(r, 0, i)
|
2021-05-20 10:23:04 +00:00
|
|
|
#define PPC_RAW_ADDIS(d, a, i) (0x3c000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
|
2021-03-22 16:37:51 +00:00
|
|
|
#define PPC_RAW_ADDIC(d, a, i) (0x30000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
|
|
|
|
#define PPC_RAW_ADDIC_DOT(d, a, i) (0x34000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
|
2020-06-24 11:30:36 +00:00
|
|
|
#define PPC_RAW_LIS(r, i) PPC_RAW_ADDIS(r, 0, i)
|
2020-06-24 11:30:38 +00:00
|
|
|
#define PPC_RAW_STDX(r, base, b) (0x7c00012a | ___PPC_RS(r) | ___PPC_RA(base) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_STDU(r, base, i) (0xf8000001 | ___PPC_RS(r) | ___PPC_RA(base) | ((i) & 0xfffc))
|
|
|
|
#define PPC_RAW_STW(r, base, i) (0x90000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i))
|
|
|
|
#define PPC_RAW_STWU(r, base, i) (0x94000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i))
|
|
|
|
#define PPC_RAW_STH(r, base, i) (0xb0000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i))
|
|
|
|
#define PPC_RAW_STB(r, base, i) (0x98000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i))
|
|
|
|
#define PPC_RAW_LBZ(r, base, i) (0x88000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
|
|
|
|
#define PPC_RAW_LDX(r, base, b) (0x7c00002a | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_LHZ(r, base, i) (0xa0000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
|
|
|
|
#define PPC_RAW_LHBRX(r, base, b) (0x7c00062c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
|
2022-01-06 11:45:12 +00:00
|
|
|
#define PPC_RAW_LWBRX(r, base, b) (0x7c00042c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
|
2020-06-24 11:30:38 +00:00
|
|
|
#define PPC_RAW_LDBRX(r, base, b) (0x7c000428 | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_STWCX(s, a, b) (0x7c00012d | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_CMPWI(a, i) (0x2c000000 | ___PPC_RA(a) | IMM_L(i))
|
|
|
|
#define PPC_RAW_CMPDI(a, i) (0x2c200000 | ___PPC_RA(a) | IMM_L(i))
|
|
|
|
#define PPC_RAW_CMPW(a, b) (0x7c000000 | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_CMPD(a, b) (0x7c200000 | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_CMPLWI(a, i) (0x28000000 | ___PPC_RA(a) | IMM_L(i))
|
|
|
|
#define PPC_RAW_CMPLDI(a, i) (0x28200000 | ___PPC_RA(a) | IMM_L(i))
|
|
|
|
#define PPC_RAW_CMPLW(a, b) (0x7c000040 | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_CMPLD(a, b) (0x7c200040 | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_SUB(d, a, b) (0x7c000050 | ___PPC_RT(d) | ___PPC_RB(a) | ___PPC_RA(b))
|
2021-03-22 16:37:51 +00:00
|
|
|
#define PPC_RAW_SUBFC(d, a, b) (0x7c000010 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_SUBFE(d, a, b) (0x7c000110 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_SUBFIC(d, a, i) (0x20000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
|
|
|
|
#define PPC_RAW_SUBFZE(d, a) (0x7c000190 | ___PPC_RT(d) | ___PPC_RA(a))
|
2020-06-24 11:30:38 +00:00
|
|
|
#define PPC_RAW_MULD(d, a, b) (0x7c0001d2 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_MULW(d, a, b) (0x7c0001d6 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_MULHWU(d, a, b) (0x7c000016 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_MULI(d, a, i) (0x1c000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
|
|
|
|
#define PPC_RAW_DIVWU(d, a, b) (0x7c000396 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_DIVDU(d, a, b) (0x7c000392 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
|
2020-07-28 13:03:06 +00:00
|
|
|
#define PPC_RAW_DIVDE(t, a, b) (0x7c000352 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_DIVDE_DOT(t, a, b) (0x7c000352 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
|
|
|
|
#define PPC_RAW_DIVDEU(t, a, b) (0x7c000312 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_DIVDEU_DOT(t, a, b) (0x7c000312 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
|
2020-06-24 11:30:38 +00:00
|
|
|
#define PPC_RAW_AND(d, a, b) (0x7c000038 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_ANDI(d, a, i) (0x70000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
|
2021-03-22 16:37:51 +00:00
|
|
|
#define PPC_RAW_ANDIS(d, a, i) (0x74000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
|
2020-06-24 11:30:38 +00:00
|
|
|
#define PPC_RAW_AND_DOT(d, a, b) (0x7c000039 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_OR(d, a, b) (0x7c000378 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
|
2020-06-24 11:30:36 +00:00
|
|
|
#define PPC_RAW_MR(d, a) PPC_RAW_OR(d, a, a)
|
2021-05-20 13:50:49 +00:00
|
|
|
#define PPC_RAW_ORI(d, a, i) (0x60000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
|
|
|
|
#define PPC_RAW_ORIS(d, a, i) (0x64000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
|
2021-03-22 16:37:51 +00:00
|
|
|
#define PPC_RAW_NOR(d, a, b) (0x7c0000f8 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
|
2020-06-24 11:30:38 +00:00
|
|
|
#define PPC_RAW_XOR(d, a, b) (0x7c000278 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
|
|
|
|
#define PPC_RAW_XORI(d, a, i) (0x68000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
|
|
|
|
#define PPC_RAW_XORIS(d, a, i) (0x6c000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
|
|
|
|
#define PPC_RAW_EXTSW(d, a) (0x7c0007b4 | ___PPC_RA(d) | ___PPC_RS(a))
|
|
|
|
#define PPC_RAW_SLW(d, a, s) (0x7c000030 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
|
|
|
|
#define PPC_RAW_SLD(d, a, s) (0x7c000036 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
|
|
|
|
#define PPC_RAW_SRW(d, a, s) (0x7c000430 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
|
|
|
|
#define PPC_RAW_SRAW(d, a, s) (0x7c000630 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
|
|
|
|
#define PPC_RAW_SRAWI(d, a, i) (0x7c000670 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i))
|
|
|
|
#define PPC_RAW_SRD(d, a, s) (0x7c000436 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
|
|
|
|
#define PPC_RAW_SRAD(d, a, s) (0x7c000634 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
|
|
|
|
#define PPC_RAW_SRADI(d, a, i) (0x7c000674 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i))
|
|
|
|
#define PPC_RAW_RLWINM(d, a, i, mb, me) (0x54000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me))
|
2020-06-24 11:30:36 +00:00
|
|
|
#define PPC_RAW_RLWINM_DOT(d, a, i, mb, me) \
|
2020-06-24 11:30:38 +00:00
|
|
|
(0x54000001 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me))
|
|
|
|
#define PPC_RAW_RLWIMI(d, a, i, mb, me) (0x50000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me))
|
|
|
|
#define PPC_RAW_RLDICL(d, a, i, mb) (0x78000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_MB64(mb))
|
2021-05-20 13:50:49 +00:00
|
|
|
#define PPC_RAW_RLDICR(d, a, i, me) (0x78000004 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_ME64(me))
|
2020-06-24 11:30:36 +00:00
|
|
|
|
|
|
|
/* slwi = rlwinm Rx, Ry, n, 0, 31-n */
|
|
|
|
#define PPC_RAW_SLWI(d, a, i) PPC_RAW_RLWINM(d, a, i, 0, 31-(i))
|
|
|
|
/* srwi = rlwinm Rx, Ry, 32-n, n, 31 */
|
|
|
|
#define PPC_RAW_SRWI(d, a, i) PPC_RAW_RLWINM(d, a, 32-(i), i, 31)
|
|
|
|
/* sldi = rldicr Rx, Ry, n, 63-n */
|
|
|
|
#define PPC_RAW_SLDI(d, a, i) PPC_RAW_RLDICR(d, a, i, 63-(i))
|
|
|
|
/* sldi = rldicl Rx, Ry, 64-n, n */
|
|
|
|
#define PPC_RAW_SRDI(d, a, i) PPC_RAW_RLDICL(d, a, 64-(i), i)
|
|
|
|
|
2020-06-24 11:30:38 +00:00
|
|
|
#define PPC_RAW_NEG(d, a) (0x7c0000d0 | ___PPC_RT(d) | ___PPC_RA(a))
|
2020-06-24 11:30:33 +00:00
|
|
|
|
2020-11-24 15:24:59 +00:00
|
|
|
#define PPC_RAW_MFSPR(d, spr) (0x7c0002a6 | ___PPC_RT(d) | __PPC_SPR(spr))
|
2021-05-20 10:23:09 +00:00
|
|
|
#define PPC_RAW_MTSPR(spr, d) (0x7c0003a6 | ___PPC_RS(d) | __PPC_SPR(spr))
|
|
|
|
#define PPC_RAW_EIEIO() (0x7c0006ac)
|
2020-11-24 15:24:59 +00:00
|
|
|
|
2022-05-09 05:36:20 +00:00
|
|
|
#define PPC_RAW_BRANCH(offset) (0x48000000 | PPC_LI(offset))
|
2022-05-09 05:36:14 +00:00
|
|
|
#define PPC_RAW_BL(offset) (0x48000001 | PPC_LI(offset))
|
2021-10-12 12:30:52 +00:00
|
|
|
|
2009-02-10 20:10:44 +00:00
|
|
|
/* Deal with instructions that older assemblers aren't aware of */
|
2020-06-09 07:06:08 +00:00
|
|
|
#define PPC_BCCTR_FLUSH stringify_in_c(.long PPC_INST_BCCTR_FLUSH)
|
2020-06-24 11:30:37 +00:00
|
|
|
#define PPC_CP_ABORT stringify_in_c(.long PPC_RAW_CP_ABORT)
|
|
|
|
#define PPC_COPY(a, b) stringify_in_c(.long PPC_RAW_COPY(a, b))
|
|
|
|
#define PPC_DARN(t, l) stringify_in_c(.long PPC_RAW_DARN(t, l))
|
|
|
|
#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_RAW_DCBAL(a, b))
|
|
|
|
#define PPC_DCBZL(a, b) stringify_in_c(.long PPC_RAW_DCBZL(a, b))
|
2020-07-28 13:03:06 +00:00
|
|
|
#define PPC_DIVDE(t, a, b) stringify_in_c(.long PPC_RAW_DIVDE(t, a, b))
|
|
|
|
#define PPC_DIVDEU(t, a, b) stringify_in_c(.long PPC_RAW_DIVDEU(t, a, b))
|
2021-12-21 05:59:03 +00:00
|
|
|
#define PPC_DSSALL stringify_in_c(.long PPC_INST_DSSALL)
|
2020-06-24 11:30:37 +00:00
|
|
|
#define PPC_LQARX(t, a, b, eh) stringify_in_c(.long PPC_RAW_LQARX(t, a, b, eh))
|
|
|
|
#define PPC_STQCX(t, a, b) stringify_in_c(.long PPC_RAW_STQCX(t, a, b))
|
|
|
|
#define PPC_MADDHD(t, a, b, c) stringify_in_c(.long PPC_RAW_MADDHD(t, a, b, c))
|
|
|
|
#define PPC_MADDHDU(t, a, b, c) stringify_in_c(.long PPC_RAW_MADDHDU(t, a, b, c))
|
|
|
|
#define PPC_MADDLD(t, a, b, c) stringify_in_c(.long PPC_RAW_MADDLD(t, a, b, c))
|
|
|
|
#define PPC_MSGSND(b) stringify_in_c(.long PPC_RAW_MSGSND(b))
|
|
|
|
#define PPC_MSGSYNC stringify_in_c(.long PPC_RAW_MSGSYNC)
|
|
|
|
#define PPC_MSGCLR(b) stringify_in_c(.long PPC_RAW_MSGCLR(b))
|
|
|
|
#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_RAW_MSGSNDP(b))
|
|
|
|
#define PPC_MSGCLRP(b) stringify_in_c(.long PPC_RAW_MSGCLRP(b))
|
|
|
|
#define PPC_PASTE(a, b) stringify_in_c(.long PPC_RAW_PASTE(a, b))
|
|
|
|
#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_RAW_POPCNTB(a, s))
|
|
|
|
#define PPC_POPCNTD(a, s) stringify_in_c(.long PPC_RAW_POPCNTD(a, s))
|
|
|
|
#define PPC_POPCNTW(a, s) stringify_in_c(.long PPC_RAW_POPCNTW(a, s))
|
|
|
|
#define PPC_RFCI stringify_in_c(.long PPC_RAW_RFCI)
|
|
|
|
#define PPC_RFDI stringify_in_c(.long PPC_RAW_RFDI)
|
|
|
|
#define PPC_RFMCI stringify_in_c(.long PPC_RAW_RFMCI)
|
|
|
|
#define PPC_TLBILX(t, a, b) stringify_in_c(.long PPC_RAW_TLBILX(t, a, b))
|
2009-02-10 20:10:44 +00:00
|
|
|
#define PPC_TLBILX_ALL(a, b) PPC_TLBILX(0, a, b)
|
|
|
|
#define PPC_TLBILX_PID(a, b) PPC_TLBILX(1, a, b)
|
|
|
|
#define PPC_TLBILX_VA(a, b) PPC_TLBILX(3, a, b)
|
2020-06-24 11:30:37 +00:00
|
|
|
#define PPC_WAIT(w) stringify_in_c(.long PPC_RAW_WAIT(w))
|
|
|
|
#define PPC_TLBIE(lp, a) stringify_in_c(.long PPC_RAW_TLBIE(lp, a))
|
|
|
|
#define PPC_TLBIE_5(rb, rs, ric, prs, r) \
|
|
|
|
stringify_in_c(.long PPC_RAW_TLBIE_5(rb, rs, ric, prs, r))
|
2016-07-13 09:35:20 +00:00
|
|
|
#define PPC_TLBIEL(rb,rs,ric,prs,r) \
|
2020-06-24 11:30:37 +00:00
|
|
|
stringify_in_c(.long PPC_RAW_TLBIEL(rb, rs, ric, prs, r))
|
2021-12-21 05:59:04 +00:00
|
|
|
#define PPC_TLBIEL_v205(rb, l) stringify_in_c(.long PPC_RAW_TLBIEL_v205(rb, l))
|
2020-06-24 11:30:37 +00:00
|
|
|
#define PPC_TLBSRX_DOT(a, b) stringify_in_c(.long PPC_RAW_TLBSRX_DOT(a, b))
|
|
|
|
#define PPC_TLBIVAX(a, b) stringify_in_c(.long PPC_RAW_TLBIVAX(a, b))
|
|
|
|
|
|
|
|
#define PPC_ERATWE(s, a, w) stringify_in_c(.long PPC_RAW_ERATWE(s, a, w))
|
|
|
|
#define PPC_ERATRE(s, a, w) stringify_in_c(.long PPC_RAW_ERATRE(a, a, w))
|
|
|
|
#define PPC_ERATILX(t, a, b) stringify_in_c(.long PPC_RAW_ERATILX(t, a, b))
|
|
|
|
#define PPC_ERATIVAX(s, a, b) stringify_in_c(.long PPC_RAW_ERATIVAX(s, a, b))
|
|
|
|
#define PPC_ERATSX(t, a, w) stringify_in_c(.long PPC_RAW_ERATSX(t, a, w))
|
|
|
|
#define PPC_ERATSX_DOT(t, a, w) stringify_in_c(.long PPC_RAW_ERATSX_DOT(t, a, w))
|
|
|
|
#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_RAW_SLBFEE_DOT(t, b))
|
|
|
|
#define __PPC_SLBFEE_DOT(t, b) stringify_in_c(.long __PPC_RAW_SLBFEE_DOT(t, b))
|
|
|
|
#define PPC_ICBT(c, a, b) stringify_in_c(.long PPC_RAW_ICBT(c, a, b))
|
2012-06-25 13:33:13 +00:00
|
|
|
/* PASemi instructions */
|
2020-06-24 11:30:37 +00:00
|
|
|
#define LBZCIX(t, a, b) stringify_in_c(.long PPC_RAW_LBZCIX(t, a, b))
|
|
|
|
#define STBCIX(s, a, b) stringify_in_c(.long PPC_RAW_STBCIX(s, a, b))
|
|
|
|
#define PPC_DCBFPS(a, b) stringify_in_c(.long PPC_RAW_DCBFPS(a, b))
|
|
|
|
#define PPC_DCBSTPS(a, b) stringify_in_c(.long PPC_RAW_DCBSTPS(a, b))
|
|
|
|
#define PPC_PHWSYNC stringify_in_c(.long PPC_RAW_PHWSYNC)
|
|
|
|
#define PPC_PLWSYNC stringify_in_c(.long PPC_RAW_PLWSYNC)
|
|
|
|
#define STXVD2X(s, a, b) stringify_in_c(.long PPC_RAW_STXVD2X(s, a, b))
|
|
|
|
#define LXVD2X(s, a, b) stringify_in_c(.long PPC_RAW_LXVD2X(s, a, b))
|
|
|
|
#define MFVRD(a, t) stringify_in_c(.long PPC_RAW_MFVRD(a, t))
|
|
|
|
#define MTVRD(t, a) stringify_in_c(.long PPC_RAW_MTVRD(t, a))
|
|
|
|
#define VPMSUMW(t, a, b) stringify_in_c(.long PPC_RAW_VPMSUMW(t, a, b))
|
|
|
|
#define VPMSUMD(t, a, b) stringify_in_c(.long PPC_RAW_VPMSUMD(t, a, b))
|
|
|
|
#define XXLOR(t, a, b) stringify_in_c(.long PPC_RAW_XXLOR(t, a, b))
|
|
|
|
#define XXSWAPD(t, a) stringify_in_c(.long PPC_RAW_XXSWAPD(t, a))
|
|
|
|
#define XVCPSGNDP(t, a, b) stringify_in_c(.long (PPC_RAW_XVCPSGNDP(t, a, b)))
|
2009-04-29 20:58:01 +00:00
|
|
|
|
2017-08-04 03:42:32 +00:00
|
|
|
#define VPERMXOR(vrt, vra, vrb, vrc) \
|
2020-06-24 11:30:37 +00:00
|
|
|
stringify_in_c(.long (PPC_RAW_VPERMXOR(vrt, vra, vrb, vrc)))
|
2017-08-04 03:42:32 +00:00
|
|
|
|
2020-06-24 11:30:37 +00:00
|
|
|
#define PPC_NAP stringify_in_c(.long PPC_RAW_NAP)
|
|
|
|
#define PPC_SLEEP stringify_in_c(.long PPC_RAW_SLEEP)
|
|
|
|
#define PPC_WINKLE stringify_in_c(.long PPC_RAW_WINKLE)
|
2011-01-24 07:42:41 +00:00
|
|
|
|
2020-06-24 11:30:37 +00:00
|
|
|
#define PPC_STOP stringify_in_c(.long PPC_RAW_STOP)
|
2016-07-08 06:20:49 +00:00
|
|
|
|
2013-04-22 19:42:40 +00:00
|
|
|
/* BHRB instructions */
|
2020-06-24 11:30:37 +00:00
|
|
|
#define PPC_CLRBHRB stringify_in_c(.long PPC_RAW_CLRBHRB)
|
|
|
|
#define PPC_MFBHRBE(r, n) stringify_in_c(.long PPC_RAW_MFBHRBE(r, n))
|
2013-04-22 19:42:40 +00:00
|
|
|
|
2013-02-13 16:21:30 +00:00
|
|
|
/* Transactional memory instructions */
|
2020-06-24 11:30:37 +00:00
|
|
|
#define TRECHKPT stringify_in_c(.long PPC_RAW_TRECHKPT)
|
|
|
|
#define TRECLAIM(r) stringify_in_c(.long PPC_RAW_TRECLAIM(r))
|
|
|
|
#define TABORT(r) stringify_in_c(.long PPC_RAW_TABORT(r))
|
2013-02-13 16:21:30 +00:00
|
|
|
|
2011-12-08 07:20:27 +00:00
|
|
|
/* book3e thread control instructions */
|
2020-06-24 11:30:37 +00:00
|
|
|
#define MTTMR(tmr, r) stringify_in_c(.long PPC_RAW_MTTMR(tmr, r))
|
|
|
|
#define MFTMR(tmr, r) stringify_in_c(.long PPC_RAW_MFTMR(tmr, r))
|
2011-12-08 07:20:27 +00:00
|
|
|
|
2015-05-07 17:49:13 +00:00
|
|
|
/* Coprocessor instructions */
|
2020-06-24 11:30:37 +00:00
|
|
|
#define PPC_ICSWX(s, a, b) stringify_in_c(.long PPC_RAW_ICSWX(s, a, b))
|
|
|
|
#define PPC_ICSWEPX(s, a, b) stringify_in_c(.long PPC_RAW_ICSWEPX(s, a, b))
|
|
|
|
|
|
|
|
#define PPC_SLBIA(IH) stringify_in_c(.long PPC_RAW_SLBIA(IH))
|
2019-06-23 10:41:52 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* These may only be used on ISA v3.0 or later (aka. CPU_FTR_ARCH_300, radix
|
|
|
|
* implies CPU_FTR_ARCH_300). USER/GUEST invalidates may only be used by radix
|
|
|
|
* mode (on HPT these would also invalidate various SLBEs which may not be
|
|
|
|
* desired).
|
|
|
|
*/
|
2019-06-23 10:41:51 +00:00
|
|
|
#define PPC_ISA_3_0_INVALIDATE_ERAT PPC_SLBIA(7)
|
2019-06-23 10:41:52 +00:00
|
|
|
#define PPC_RADIX_INVALIDATE_ERAT_USER PPC_SLBIA(3)
|
|
|
|
#define PPC_RADIX_INVALIDATE_ERAT_GUEST PPC_SLBIA(6)
|
2015-05-07 17:49:13 +00:00
|
|
|
|
2020-06-24 11:30:37 +00:00
|
|
|
#define VCMPEQUD_RC(vrt, vra, vrb) stringify_in_c(.long PPC_RAW_VCMPEQUD_RC(vrt, vra, vrb))
|
2018-06-07 01:57:52 +00:00
|
|
|
|
2020-06-24 11:30:37 +00:00
|
|
|
#define VCMPEQUB_RC(vrt, vra, vrb) stringify_in_c(.long PPC_RAW_VCMPEQUB_RC(vrt, vra, vrb))
|
2018-06-07 01:57:52 +00:00
|
|
|
|
2009-02-10 20:10:44 +00:00
|
|
|
#endif /* _ASM_POWERPC_PPC_OPCODE_H */
|