KVM: arm64: Add kimg_hyp_va() helper
KVM/arm64 is so far unable to deal with function pointers, as the compiler will generate the kernel's runtime VA, and not the linear mapping address, meaning that kern_hyp_va() will give the wrong result. We so far have been able to use PC-relative addressing, but that's not always easy to use, and prevents the implementation of things like the mapping of an index to a pointer. To allow this, provide a new helper that computes the required translation from the kernel image to the HYP VA space. Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Родитель
f8394f232b
Коммит
1db9d9ded7
|
@ -98,6 +98,24 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
|
|||
|
||||
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
|
||||
|
||||
static __always_inline unsigned long __kimg_hyp_va(unsigned long v)
|
||||
{
|
||||
unsigned long offset;
|
||||
|
||||
asm volatile(ALTERNATIVE_CB("movz %0, #0\n"
|
||||
"movk %0, #0, lsl #16\n"
|
||||
"movk %0, #0, lsl #32\n"
|
||||
"movk %0, #0, lsl #48\n",
|
||||
kvm_update_kimg_phys_offset)
|
||||
: "=r" (offset));
|
||||
|
||||
return __kern_hyp_va((v - offset) | PAGE_OFFSET);
|
||||
}
|
||||
|
||||
#define kimg_fn_hyp_va(v) ((typeof(*v))(__kimg_hyp_va((unsigned long)(v))))
|
||||
|
||||
#define kimg_fn_ptr(x) (typeof(x) **)(x)
|
||||
|
||||
/*
|
||||
* We currently support using a VM-specified IPA size. For backward
|
||||
* compatibility, the default IPA size is fixed to 40bits.
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <asm/debug-monitors.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/memory.h>
|
||||
|
||||
/*
|
||||
* The LSB of the HYP VA tag
|
||||
|
@ -201,3 +202,52 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
|
|||
AARCH64_INSN_BRANCH_NOLINK);
|
||||
*updptr++ = cpu_to_le32(insn);
|
||||
}
|
||||
|
||||
static void generate_mov_q(u64 val, __le32 *origptr, __le32 *updptr, int nr_inst)
|
||||
{
|
||||
u32 insn, oinsn, rd;
|
||||
|
||||
BUG_ON(nr_inst != 4);
|
||||
|
||||
/* Compute target register */
|
||||
oinsn = le32_to_cpu(*origptr);
|
||||
rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
|
||||
|
||||
/* movz rd, #(val & 0xffff) */
|
||||
insn = aarch64_insn_gen_movewide(rd,
|
||||
(u16)val,
|
||||
0,
|
||||
AARCH64_INSN_VARIANT_64BIT,
|
||||
AARCH64_INSN_MOVEWIDE_ZERO);
|
||||
*updptr++ = cpu_to_le32(insn);
|
||||
|
||||
/* movk rd, #((val >> 16) & 0xffff), lsl #16 */
|
||||
insn = aarch64_insn_gen_movewide(rd,
|
||||
(u16)(val >> 16),
|
||||
16,
|
||||
AARCH64_INSN_VARIANT_64BIT,
|
||||
AARCH64_INSN_MOVEWIDE_KEEP);
|
||||
*updptr++ = cpu_to_le32(insn);
|
||||
|
||||
/* movk rd, #((val >> 32) & 0xffff), lsl #32 */
|
||||
insn = aarch64_insn_gen_movewide(rd,
|
||||
(u16)(val >> 32),
|
||||
32,
|
||||
AARCH64_INSN_VARIANT_64BIT,
|
||||
AARCH64_INSN_MOVEWIDE_KEEP);
|
||||
*updptr++ = cpu_to_le32(insn);
|
||||
|
||||
/* movk rd, #((val >> 48) & 0xffff), lsl #48 */
|
||||
insn = aarch64_insn_gen_movewide(rd,
|
||||
(u16)(val >> 48),
|
||||
48,
|
||||
AARCH64_INSN_VARIANT_64BIT,
|
||||
AARCH64_INSN_MOVEWIDE_KEEP);
|
||||
*updptr++ = cpu_to_le32(insn);
|
||||
}
|
||||
|
||||
void kvm_update_kimg_phys_offset(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr, int nr_inst)
|
||||
{
|
||||
generate_mov_q(kimage_voffset + PHYS_OFFSET, origptr, updptr, nr_inst);
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче