2019-05-27 09:55:01 +03:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
|
|
|
|
*/
|
2020-05-06 06:40:31 +03:00
|
|
|
#include <asm/inst.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
struct pt_regs;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't allow single-stepping an mtmsrd that would clear
|
|
|
|
* MSR_RI, since that would make the exception unrecoverable.
|
|
|
|
* Since we need to single-step to proceed from a breakpoint,
|
|
|
|
* we don't allow putting a breakpoint on an mtmsrd instruction.
|
|
|
|
* Similarly we don't allow breakpoints on rfid instructions.
|
|
|
|
* These macros tell us if an instruction is a mtmsrd or rfid.
|
2021-05-19 13:47:18 +03:00
|
|
|
* Note that these return true for both mtmsr/rfi (32-bit)
|
|
|
|
* and mtmsrd/rfid (64-bit).
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2020-05-06 06:40:27 +03:00
|
|
|
#define IS_MTMSRD(instr) ((ppc_inst_val(instr) & 0xfc0007be) == 0x7c000124)
|
2021-05-19 13:47:18 +03:00
|
|
|
#define IS_RFID(instr) ((ppc_inst_val(instr) & 0xfc0007be) == 0x4c000024)
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2014-09-02 08:35:07 +04:00
|
|
|
enum instruction_type {
|
|
|
|
COMPUTE, /* arith/logical/CR op, etc. */
|
2017-08-30 07:12:28 +03:00
|
|
|
LOAD, /* load and store types need to be contiguous */
|
2014-09-02 08:35:07 +04:00
|
|
|
LOAD_MULTI,
|
|
|
|
LOAD_FP,
|
|
|
|
LOAD_VMX,
|
|
|
|
LOAD_VSX,
|
|
|
|
STORE,
|
|
|
|
STORE_MULTI,
|
|
|
|
STORE_FP,
|
|
|
|
STORE_VMX,
|
|
|
|
STORE_VSX,
|
|
|
|
LARX,
|
|
|
|
STCX,
|
|
|
|
BRANCH,
|
|
|
|
MFSPR,
|
|
|
|
MTSPR,
|
|
|
|
CACHEOP,
|
|
|
|
BARRIER,
|
|
|
|
SYSCALL,
|
2020-06-11 11:12:03 +03:00
|
|
|
SYSCALL_VECTORED_0,
|
2014-09-02 08:35:07 +04:00
|
|
|
MFMSR,
|
|
|
|
MTMSR,
|
|
|
|
RFI,
|
|
|
|
INTERRUPT,
|
|
|
|
UNKNOWN
|
|
|
|
};
|
|
|
|
|
|
|
|
#define INSTR_TYPE_MASK 0x1f
|
|
|
|
|
2020-05-14 14:17:38 +03:00
|
|
|
#define OP_IS_LOAD(type) ((LOAD <= (type) && (type) <= LOAD_VSX) || (type) == LARX)
|
|
|
|
#define OP_IS_STORE(type) ((STORE <= (type) && (type) <= STORE_VSX) || (type) == STCX)
|
2017-08-30 07:12:28 +03:00
|
|
|
#define OP_IS_LOAD_STORE(type) (LOAD <= (type) && (type) <= STCX)
|
|
|
|
|
2017-08-30 07:12:25 +03:00
|
|
|
/* Compute flags, ORed in with type */
|
|
|
|
#define SETREG 0x20
|
|
|
|
#define SETCC 0x40
|
|
|
|
#define SETXER 0x80
|
|
|
|
|
|
|
|
/* Branch flags, ORed in with type */
|
|
|
|
#define SETLK 0x20
|
|
|
|
#define BRTAKEN 0x40
|
|
|
|
#define DECCTR 0x80
|
|
|
|
|
2014-09-02 08:35:07 +04:00
|
|
|
/* Load/store flags, ORed in with type */
|
|
|
|
#define SIGNEXT 0x20
|
|
|
|
#define UPDATE 0x40 /* matches bit in opcode 31 instructions */
|
|
|
|
#define BYTEREV 0x80
|
2017-08-30 09:34:09 +03:00
|
|
|
#define FPCONV 0x100
|
2014-09-02 08:35:07 +04:00
|
|
|
|
2017-08-30 07:12:25 +03:00
|
|
|
/* Barrier type field, ORed in with type */
|
|
|
|
#define BARRIER_MASK 0xe0
|
|
|
|
#define BARRIER_SYNC 0x00
|
|
|
|
#define BARRIER_ISYNC 0x20
|
|
|
|
#define BARRIER_EIEIO 0x40
|
|
|
|
#define BARRIER_LWSYNC 0x60
|
|
|
|
#define BARRIER_PTESYNC 0x80
|
|
|
|
|
2014-09-02 08:35:07 +04:00
|
|
|
/* Cacheop values, ORed in with type */
|
|
|
|
#define CACHEOP_MASK 0x700
|
|
|
|
#define DCBST 0
|
|
|
|
#define DCBF 0x100
|
|
|
|
#define DCBTST 0x200
|
|
|
|
#define DCBT 0x300
|
2014-09-02 08:35:08 +04:00
|
|
|
#define ICBI 0x400
|
2017-08-30 07:12:36 +03:00
|
|
|
#define DCBZ 0x500
|
2014-09-02 08:35:07 +04:00
|
|
|
|
powerpc: Handle most loads and stores in instruction emulation code
This extends the instruction emulation infrastructure in sstep.c to
handle all the load and store instructions defined in the Power ISA
v3.0, except for the atomic memory operations, ldmx (which was never
implemented), lfdp/stfdp, and the vector element load/stores.
The instructions added are:
Integer loads and stores: lbarx, lharx, lqarx, stbcx., sthcx., stqcx.,
lq, stq.
VSX loads and stores: lxsiwzx, lxsiwax, stxsiwx, lxvx, lxvl, lxvll,
lxvdsx, lxvwsx, stxvx, stxvl, stxvll, lxsspx, lxsdx, stxsspx, stxsdx,
lxvw4x, lxsibzx, lxvh8x, lxsihzx, lxvb16x, stxvw4x, stxsibx, stxvh8x,
stxsihx, stxvb16x, lxsd, lxssp, lxv, stxsd, stxssp, stxv.
These instructions are handled both in the analyse_instr phase and in
the emulate_step phase.
The code for lxvd2ux and stxvd2ux has been taken out, as those
instructions were never implemented in any processor and have been
taken out of the architecture, and their opcodes have been reused for
other instructions in POWER9 (lxvb16x and stxvb16x).
The emulation for the VSX loads and stores uses helper functions
which don't access registers or memory directly, which can hopefully
be reused by KVM later.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-08-30 07:12:27 +03:00
|
|
|
/* VSX flags values */
|
|
|
|
#define VSX_FPCONV 1 /* do floating point SP/DP conversion */
|
|
|
|
#define VSX_SPLAT 2 /* store loaded value into all elements */
|
|
|
|
#define VSX_LDLEFT 4 /* load VSX register from left */
|
|
|
|
#define VSX_CHECK_VEC 8 /* check MSR_VEC not MSR_VSX for reg >= 32 */
|
|
|
|
|
2020-05-06 06:40:49 +03:00
|
|
|
/* Prefixed flag, ORed in with type */
|
|
|
|
#define PREFIXED 0x800
|
|
|
|
|
2014-09-02 08:35:07 +04:00
|
|
|
/* Size field in type word */
|
2017-08-30 09:34:09 +03:00
|
|
|
#define SIZE(n) ((n) << 12)
|
|
|
|
#define GETSIZE(w) ((w) >> 12)
|
2014-09-02 08:35:07 +04:00
|
|
|
|
2018-05-21 07:21:06 +03:00
|
|
|
#define GETTYPE(t) ((t) & INSTR_TYPE_MASK)
|
2020-05-06 06:40:49 +03:00
|
|
|
#define GETLENGTH(t) (((t) & PREFIXED) ? 8 : 4)
|
2018-05-21 07:21:06 +03:00
|
|
|
|
2014-09-02 08:35:07 +04:00
|
|
|
#define MKOP(t, f, s) ((t) | (f) | SIZE(s))
|
|
|
|
|
2020-06-26 12:51:57 +03:00
|
|
|
/* Prefix instruction operands */
|
|
|
|
#define GET_PREFIX_RA(i) (((i) >> 16) & 0x1f)
|
|
|
|
#define GET_PREFIX_R(i) ((i) & (1ul << 20))
|
|
|
|
|
2020-06-26 12:51:58 +03:00
|
|
|
extern s32 patch__exec_instr;
|
|
|
|
|
2014-09-02 08:35:07 +04:00
|
|
|
struct instruction_op {
|
|
|
|
int type;
|
|
|
|
int reg;
|
|
|
|
unsigned long val;
|
|
|
|
/* For LOAD/STORE/LARX/STCX */
|
|
|
|
unsigned long ea;
|
|
|
|
int update_reg;
|
|
|
|
/* For MFSPR */
|
|
|
|
int spr;
|
2017-08-30 07:12:25 +03:00
|
|
|
u32 ccval;
|
|
|
|
u32 xerval;
|
powerpc: Handle most loads and stores in instruction emulation code
This extends the instruction emulation infrastructure in sstep.c to
handle all the load and store instructions defined in the Power ISA
v3.0, except for the atomic memory operations, ldmx (which was never
implemented), lfdp/stfdp, and the vector element load/stores.
The instructions added are:
Integer loads and stores: lbarx, lharx, lqarx, stbcx., sthcx., stqcx.,
lq, stq.
VSX loads and stores: lxsiwzx, lxsiwax, stxsiwx, lxvx, lxvl, lxvll,
lxvdsx, lxvwsx, stxvx, stxvl, stxvll, lxsspx, lxsdx, stxsspx, stxsdx,
lxvw4x, lxsibzx, lxvh8x, lxsihzx, lxvb16x, stxvw4x, stxsibx, stxvh8x,
stxsihx, stxvb16x, lxsd, lxssp, lxv, stxsd, stxssp, stxv.
These instructions are handled both in the analyse_instr phase and in
the emulate_step phase.
The code for lxvd2ux and stxvd2ux has been taken out, as those
instructions were never implemented in any processor and have been
taken out of the architecture, and their opcodes have been reused for
other instructions in POWER9 (lxvb16x and stxvb16x).
The emulation for the VSX loads and stores uses helper functions
which don't access registers or memory directly, which can hopefully
be reused by KVM later.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-08-30 07:12:27 +03:00
|
|
|
u8 element_size; /* for VSX/VMX loads/stores */
|
|
|
|
u8 vsx_flags;
|
|
|
|
};
|
|
|
|
|
|
|
|
union vsx_reg {
|
|
|
|
u8 b[16];
|
|
|
|
u16 h[8];
|
|
|
|
u32 w[4];
|
|
|
|
unsigned long d[2];
|
|
|
|
float fp[4];
|
|
|
|
double dp[2];
|
powerpc: Emulate FP/vector/VSX loads/stores correctly when regs not live
At present, the analyse_instr/emulate_step code checks for the
relevant MSR_FP/VEC/VSX bit being set when a FP/VMX/VSX load
or store is decoded, but doesn't recheck the bit before reading or
writing the relevant FP/VMX/VSX register in emulate_step().
Since we don't have preemption disabled, it is possible that we get
preempted between checking the MSR bit and doing the register access.
If that happened, then the registers would have been saved to the
thread_struct for the current process. Accesses to the CPU registers
would then potentially read stale values, or write values that would
never be seen by the user process.
Another way that the registers can become non-live is if a page
fault occurs when accessing user memory, and the page fault code
calls a copy routine that wants to use the VMX or VSX registers.
To fix this, the code for all the FP/VMX/VSX loads gets restructured
so that it forms an image in a local variable of the desired register
contents, then disables preemption, checks the MSR bit and either
sets the CPU register or writes the value to the thread struct.
Similarly, the code for stores checks the MSR bit, copies either the
CPU register or the thread struct to a local variable, then reenables
preemption and then copies the register image to memory.
If the instruction being emulated is in the kernel, then we must not
use the register values in the thread_struct. In this case, if the
relevant MSR enable bit is not set, then emulate_step refuses to
emulate the instruction.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-08-30 07:12:33 +03:00
|
|
|
__vector128 v;
|
2014-09-02 08:35:07 +04:00
|
|
|
};
|
|
|
|
|
2017-08-30 07:12:25 +03:00
|
|
|
/*
|
|
|
|
* Decode an instruction, and return information about it in *op
|
|
|
|
* without changing *regs.
|
|
|
|
*
|
|
|
|
* Return value is 1 if the instruction can be emulated just by
|
|
|
|
* updating *regs with the information in *op, -1 if we need the
|
|
|
|
* GPRs but *regs doesn't contain the full register set, or 0
|
|
|
|
* otherwise.
|
|
|
|
*/
|
|
|
|
extern int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
|
2020-05-06 06:40:31 +03:00
|
|
|
struct ppc_inst instr);
|
2017-08-30 07:12:25 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Emulate an instruction that can be executed just by updating
|
|
|
|
* fields in *regs.
|
|
|
|
*/
|
|
|
|
void emulate_update_regs(struct pt_regs *reg, struct instruction_op *op);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Emulate instructions that cause a transfer of control,
|
|
|
|
* arithmetic/logical instructions, loads and stores,
|
|
|
|
* cache operations and barriers.
|
|
|
|
*
|
|
|
|
* Returns 1 if the instruction was emulated successfully,
|
|
|
|
* 0 if it could not be emulated, or -1 for an instruction that
|
|
|
|
* should not be emulated (rfid, mtmsrd clearing MSR_RI, etc.).
|
|
|
|
*/
|
2020-05-06 06:40:31 +03:00
|
|
|
extern int emulate_step(struct pt_regs *regs, struct ppc_inst instr);
|
2017-08-30 07:12:25 +03:00
|
|
|
|
2017-08-30 07:12:39 +03:00
|
|
|
/*
|
|
|
|
* Emulate a load or store instruction by reading/writing the
|
|
|
|
* memory of the current process. FP/VMX/VSX registers are assumed
|
|
|
|
* to hold live values if the appropriate enable bit in regs->msr is
|
|
|
|
* set; otherwise this will use the saved values in the thread struct
|
|
|
|
* for user-mode accesses.
|
|
|
|
*/
|
|
|
|
extern int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op);
|
|
|
|
|
powerpc: Handle most loads and stores in instruction emulation code
This extends the instruction emulation infrastructure in sstep.c to
handle all the load and store instructions defined in the Power ISA
v3.0, except for the atomic memory operations, ldmx (which was never
implemented), lfdp/stfdp, and the vector element load/stores.
The instructions added are:
Integer loads and stores: lbarx, lharx, lqarx, stbcx., sthcx., stqcx.,
lq, stq.
VSX loads and stores: lxsiwzx, lxsiwax, stxsiwx, lxvx, lxvl, lxvll,
lxvdsx, lxvwsx, stxvx, stxvl, stxvll, lxsspx, lxsdx, stxsspx, stxsdx,
lxvw4x, lxsibzx, lxvh8x, lxsihzx, lxvb16x, stxvw4x, stxsibx, stxvh8x,
stxsihx, stxvb16x, lxsd, lxssp, lxv, stxsd, stxssp, stxv.
These instructions are handled both in the analyse_instr phase and in
the emulate_step phase.
The code for lxvd2ux and stxvd2ux has been taken out, as those
instructions were never implemented in any processor and have been
taken out of the architecture, and their opcodes have been reused for
other instructions in POWER9 (lxvb16x and stxvb16x).
The emulation for the VSX loads and stores uses helper functions
which don't access registers or memory directly, which can hopefully
be reused by KVM later.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-08-30 07:12:27 +03:00
|
|
|
extern void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
|
powerpc: Handle opposite-endian processes in emulation code
This adds code to the load and store emulation code to byte-swap
the data appropriately when the process being emulated is set to
the opposite endianness to that of the kernel.
This also enables the emulation for the multiple-register loads
and stores (lmw, stmw, lswi, stswi, lswx, stswx) to work for
little-endian. In little-endian mode, the partial word at the
end of a transfer for lsw*/stsw* (when the byte count is not a
multiple of 4) is loaded/stored at the least-significant end of
the register. Additionally, this fixes a bug in the previous
code in that it could call read_mem/write_mem with a byte count
that was not 1, 2, 4 or 8.
Note that this only works correctly on processors with "true"
little-endian mode, such as IBM POWER processors from POWER6 on, not
the so-called "PowerPC" little-endian mode that uses address swizzling
as implemented on the old 32-bit 603, 604, 740/750, 74xx CPUs.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-08-30 07:12:38 +03:00
|
|
|
const void *mem, bool cross_endian);
|
|
|
|
extern void emulate_vsx_store(struct instruction_op *op,
|
|
|
|
const union vsx_reg *reg, void *mem,
|
|
|
|
bool cross_endian);
|
2017-08-30 07:12:36 +03:00
|
|
|
extern int emulate_dcbz(unsigned long ea, struct pt_regs *regs);
|