arm64: fix endianness annotation for reloc_insn_movw() & reloc_insn_imm()
Here the functions reloc_insn_movw() & reloc_insn_imm() are used to read, modify and write back ARM instructions, which are always stored in memory in little-endian order. These values are thus correctly converted to/from native order but the pointers used to hold their addresses are declared as for native order values. Fix this by declaring the pointers as __le32* and remove the casts that are now unneeded. Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
Родитель
57c138357d
Коммит
02129ae5fe
|
@ -74,7 +74,7 @@ enum aarch64_reloc_op {
|
||||||
RELOC_OP_PAGE,
|
RELOC_OP_PAGE,
|
||||||
};
|
};
|
||||||
|
|
||||||
static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val)
|
static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
|
||||||
{
|
{
|
||||||
switch (reloc_op) {
|
switch (reloc_op) {
|
||||||
case RELOC_OP_ABS:
|
case RELOC_OP_ABS:
|
||||||
|
@ -121,12 +121,12 @@ enum aarch64_insn_movw_imm_type {
|
||||||
AARCH64_INSN_IMM_MOVKZ,
|
AARCH64_INSN_IMM_MOVKZ,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
|
static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
|
||||||
int lsb, enum aarch64_insn_movw_imm_type imm_type)
|
int lsb, enum aarch64_insn_movw_imm_type imm_type)
|
||||||
{
|
{
|
||||||
u64 imm;
|
u64 imm;
|
||||||
s64 sval;
|
s64 sval;
|
||||||
u32 insn = le32_to_cpu(*(u32 *)place);
|
u32 insn = le32_to_cpu(*place);
|
||||||
|
|
||||||
sval = do_reloc(op, place, val);
|
sval = do_reloc(op, place, val);
|
||||||
imm = sval >> lsb;
|
imm = sval >> lsb;
|
||||||
|
@ -154,7 +154,7 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
|
||||||
|
|
||||||
/* Update the instruction with the new encoding. */
|
/* Update the instruction with the new encoding. */
|
||||||
insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
|
insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
|
||||||
*(u32 *)place = cpu_to_le32(insn);
|
*place = cpu_to_le32(insn);
|
||||||
|
|
||||||
if (imm > U16_MAX)
|
if (imm > U16_MAX)
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
|
@ -162,12 +162,12 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
|
static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
|
||||||
int lsb, int len, enum aarch64_insn_imm_type imm_type)
|
int lsb, int len, enum aarch64_insn_imm_type imm_type)
|
||||||
{
|
{
|
||||||
u64 imm, imm_mask;
|
u64 imm, imm_mask;
|
||||||
s64 sval;
|
s64 sval;
|
||||||
u32 insn = le32_to_cpu(*(u32 *)place);
|
u32 insn = le32_to_cpu(*place);
|
||||||
|
|
||||||
/* Calculate the relocation value. */
|
/* Calculate the relocation value. */
|
||||||
sval = do_reloc(op, place, val);
|
sval = do_reloc(op, place, val);
|
||||||
|
@ -179,7 +179,7 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
|
||||||
|
|
||||||
/* Update the instruction's immediate field. */
|
/* Update the instruction's immediate field. */
|
||||||
insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
|
insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
|
||||||
*(u32 *)place = cpu_to_le32(insn);
|
*place = cpu_to_le32(insn);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Extract the upper value bits (including the sign bit) and
|
* Extract the upper value bits (including the sign bit) and
|
||||||
|
|
Загрузка…
Ссылка в новой задаче