2019-06-03 08:44:50 +03:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2015-10-22 10:32:18 +03:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2015 - ARM Ltd
|
|
|
|
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
|
2019-06-18 18:17:37 +03:00
|
|
|
#include <asm/alternative.h>
|
2015-10-22 10:32:18 +03:00
|
|
|
#include <asm/asm-offsets.h>
|
|
|
|
#include <asm/assembler.h>
|
|
|
|
#include <asm/fpsimdmacros.h>
|
|
|
|
#include <asm/kvm.h>
|
|
|
|
#include <asm/kvm_arm.h>
|
|
|
|
#include <asm/kvm_asm.h>
|
|
|
|
#include <asm/kvm_mmu.h>
|
KVM: arm/arm64: Context-switch ptrauth registers
When pointer authentication is supported, a guest may wish to use it.
This patch adds the necessary KVM infrastructure for this to work, with
a semi-lazy context switch of the pointer auth state.
Pointer authentication feature is only enabled when VHE is built
in the kernel and present in the CPU implementation so only VHE code
paths are modified.
When we schedule a vcpu, we disable guest usage of pointer
authentication instructions and accesses to the keys. While these are
disabled, we avoid context-switching the keys. When we trap the guest
trying to use pointer authentication functionality, we change to eagerly
context-switching the keys, and enable the feature. The next time the
vcpu is scheduled out/in, we start again. However the host key save is
optimized and implemented inside ptrauth instruction/register access
trap.
Pointer authentication consists of address authentication and generic
authentication, and CPUs in a system might have varied support for
either. Where support for either feature is not uniform, it is hidden
from guests via ID register emulation, as a result of the cpufeature
framework in the host.
Unfortunately, address authentication and generic authentication cannot
be trapped separately, as the architecture provides a single EL2 trap
covering both. If we wish to expose one without the other, we cannot
prevent a (badly-written) guest from intermittently using a feature
which is not uniformly supported (when scheduled on a physical CPU which
supports the relevant feature). Hence, this patch expects both type of
authentication to be present in a cpu.
This switch of key is done from guest enter/exit assembly as preparation
for the upcoming in-kernel pointer authentication support. Hence, these
key switching routines are not implemented in C code as they may cause
pointer authentication key signing error in some situations.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
[Only VHE, key switch in full assembly, vcpu_has_ptrauth checks
, save host key in ptrauth exception trap]
Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Reviewed-by: Julien Thierry <julien.thierry@arm.com>
Cc: Christoffer Dall <christoffer.dall@arm.com>
Cc: kvmarm@lists.cs.columbia.edu
[maz: various fixups]
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2019-04-23 07:42:35 +03:00
|
|
|
#include <asm/kvm_ptrauth.h>
|
2015-10-22 10:32:18 +03:00
|
|
|
|
|
|
|
#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
|
|
|
|
#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
|
|
|
|
|
|
|
|
.text
|
|
|
|
.pushsection .hyp.text, "ax"
|
|
|
|
|
2019-12-07 01:13:39 +03:00
|
|
|
/*
|
|
|
|
* We treat x18 as callee-saved as the host may use it as a platform
|
|
|
|
* register (e.g. for shadow call stack).
|
|
|
|
*/
|
2015-10-22 10:32:18 +03:00
|
|
|
.macro save_callee_saved_regs ctxt
|
2019-12-07 01:13:39 +03:00
|
|
|
str x18, [\ctxt, #CPU_XREG_OFFSET(18)]
|
2015-10-22 10:32:18 +03:00
|
|
|
stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
|
|
|
|
stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
|
|
|
|
stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
|
|
|
|
stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
|
|
|
|
stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
|
|
|
|
stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro restore_callee_saved_regs ctxt
|
2019-12-07 01:13:39 +03:00
|
|
|
// We require \ctxt is not x18-x28
|
|
|
|
ldr x18, [\ctxt, #CPU_XREG_OFFSET(18)]
|
2015-10-22 10:32:18 +03:00
|
|
|
ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
|
|
|
|
ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
|
|
|
|
ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
|
|
|
|
ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
|
|
|
|
ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
|
|
|
|
ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
|
|
|
|
.endm
|
|
|
|
|
|
|
|
/*
|
|
|
|
* u64 __guest_enter(struct kvm_vcpu *vcpu,
|
|
|
|
* struct kvm_cpu_context *host_ctxt);
|
|
|
|
*/
|
2020-01-20 15:47:06 +03:00
|
|
|
SYM_FUNC_START(__guest_enter)
|
2015-10-22 10:32:18 +03:00
|
|
|
// x0: vcpu
|
2016-08-31 05:08:32 +03:00
|
|
|
// x1: host context
|
|
|
|
// x2-x17: clobbered by macros
|
2019-12-07 01:13:39 +03:00
|
|
|
// x29: guest context
|
2015-10-22 10:32:18 +03:00
|
|
|
|
|
|
|
// Store the host regs
|
|
|
|
save_callee_saved_regs x1
|
|
|
|
|
2019-06-18 18:17:37 +03:00
|
|
|
// Now the host state is stored if we have a pending RAS SError it must
|
|
|
|
// affect the host. If any asynchronous exception is pending we defer
|
|
|
|
// the guest entry. The DSB isn't necessary before v8.2 as any SError
|
|
|
|
// would be fatal.
|
|
|
|
alternative_if ARM64_HAS_RAS_EXTN
|
|
|
|
dsb nshst
|
|
|
|
isb
|
|
|
|
alternative_else_nop_endif
|
|
|
|
mrs x1, isr_el1
|
|
|
|
cbz x1, 1f
|
|
|
|
mov x0, #ARM_EXCEPTION_IRQ
|
|
|
|
ret
|
|
|
|
|
|
|
|
1:
|
2019-12-07 01:13:39 +03:00
|
|
|
add x29, x0, #VCPU_CONTEXT
|
2015-10-22 10:32:18 +03:00
|
|
|
|
KVM: arm/arm64: Context-switch ptrauth registers
When pointer authentication is supported, a guest may wish to use it.
This patch adds the necessary KVM infrastructure for this to work, with
a semi-lazy context switch of the pointer auth state.
Pointer authentication feature is only enabled when VHE is built
in the kernel and present in the CPU implementation so only VHE code
paths are modified.
When we schedule a vcpu, we disable guest usage of pointer
authentication instructions and accesses to the keys. While these are
disabled, we avoid context-switching the keys. When we trap the guest
trying to use pointer authentication functionality, we change to eagerly
context-switching the keys, and enable the feature. The next time the
vcpu is scheduled out/in, we start again. However the host key save is
optimized and implemented inside ptrauth instruction/register access
trap.
Pointer authentication consists of address authentication and generic
authentication, and CPUs in a system might have varied support for
either. Where support for either feature is not uniform, it is hidden
from guests via ID register emulation, as a result of the cpufeature
framework in the host.
Unfortunately, address authentication and generic authentication cannot
be trapped separately, as the architecture provides a single EL2 trap
covering both. If we wish to expose one without the other, we cannot
prevent a (badly-written) guest from intermittently using a feature
which is not uniformly supported (when scheduled on a physical CPU which
supports the relevant feature). Hence, this patch expects both type of
authentication to be present in a cpu.
This switch of key is done from guest enter/exit assembly as preparation
for the upcoming in-kernel pointer authentication support. Hence, these
key switching routines are not implemented in C code as they may cause
pointer authentication key signing error in some situations.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
[Only VHE, key switch in full assembly, vcpu_has_ptrauth checks
, save host key in ptrauth exception trap]
Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Reviewed-by: Julien Thierry <julien.thierry@arm.com>
Cc: Christoffer Dall <christoffer.dall@arm.com>
Cc: kvmarm@lists.cs.columbia.edu
[maz: various fixups]
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2019-04-23 07:42:35 +03:00
|
|
|
// Macro ptrauth_switch_to_guest format:
|
|
|
|
// ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
|
|
|
|
// The below macro to restore guest keys is not implemented in C code
|
|
|
|
// as it may cause Pointer Authentication key signing mismatch errors
|
|
|
|
// when this feature is enabled for kernel code.
|
2019-12-07 01:13:39 +03:00
|
|
|
ptrauth_switch_to_guest x29, x0, x1, x2
|
KVM: arm/arm64: Context-switch ptrauth registers
When pointer authentication is supported, a guest may wish to use it.
This patch adds the necessary KVM infrastructure for this to work, with
a semi-lazy context switch of the pointer auth state.
Pointer authentication feature is only enabled when VHE is built
in the kernel and present in the CPU implementation so only VHE code
paths are modified.
When we schedule a vcpu, we disable guest usage of pointer
authentication instructions and accesses to the keys. While these are
disabled, we avoid context-switching the keys. When we trap the guest
trying to use pointer authentication functionality, we change to eagerly
context-switching the keys, and enable the feature. The next time the
vcpu is scheduled out/in, we start again. However the host key save is
optimized and implemented inside ptrauth instruction/register access
trap.
Pointer authentication consists of address authentication and generic
authentication, and CPUs in a system might have varied support for
either. Where support for either feature is not uniform, it is hidden
from guests via ID register emulation, as a result of the cpufeature
framework in the host.
Unfortunately, address authentication and generic authentication cannot
be trapped separately, as the architecture provides a single EL2 trap
covering both. If we wish to expose one without the other, we cannot
prevent a (badly-written) guest from intermittently using a feature
which is not uniformly supported (when scheduled on a physical CPU which
supports the relevant feature). Hence, this patch expects both type of
authentication to be present in a cpu.
This switch of key is done from guest enter/exit assembly as preparation
for the upcoming in-kernel pointer authentication support. Hence, these
key switching routines are not implemented in C code as they may cause
pointer authentication key signing error in some situations.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
[Only VHE, key switch in full assembly, vcpu_has_ptrauth checks
, save host key in ptrauth exception trap]
Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Reviewed-by: Julien Thierry <julien.thierry@arm.com>
Cc: Christoffer Dall <christoffer.dall@arm.com>
Cc: kvmarm@lists.cs.columbia.edu
[maz: various fixups]
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2019-04-23 07:42:35 +03:00
|
|
|
|
2016-08-31 05:08:32 +03:00
|
|
|
// Restore guest regs x0-x17
|
2019-12-07 01:13:39 +03:00
|
|
|
ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)]
|
|
|
|
ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)]
|
|
|
|
ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)]
|
|
|
|
ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)]
|
|
|
|
ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)]
|
|
|
|
ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)]
|
|
|
|
ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)]
|
|
|
|
ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)]
|
|
|
|
ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)]
|
|
|
|
|
|
|
|
// Restore guest regs x18-x29, lr
|
|
|
|
restore_callee_saved_regs x29
|
2015-10-22 10:32:18 +03:00
|
|
|
|
|
|
|
// Do not touch any register after this!
|
|
|
|
eret
|
2018-06-14 13:23:38 +03:00
|
|
|
sb
|
2015-10-22 10:32:18 +03:00
|
|
|
|
2020-01-20 15:47:06 +03:00
|
|
|
SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
|
2016-08-31 05:08:32 +03:00
|
|
|
// x0: return code
|
|
|
|
// x1: vcpu
|
|
|
|
// x2-x29,lr: vcpu regs
|
|
|
|
// vcpu x0-x1 on the stack
|
|
|
|
|
|
|
|
add x1, x1, #VCPU_CONTEXT
|
|
|
|
|
2016-09-01 17:29:03 +03:00
|
|
|
ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
|
|
|
|
|
2016-08-31 05:08:32 +03:00
|
|
|
// Store the guest regs x2 and x3
|
|
|
|
stp x2, x3, [x1, #CPU_XREG_OFFSET(2)]
|
|
|
|
|
|
|
|
// Retrieve the guest regs x0-x1 from the stack
|
|
|
|
ldp x2, x3, [sp], #16 // x0, x1
|
|
|
|
|
2019-12-07 01:13:39 +03:00
|
|
|
// Store the guest regs x0-x1 and x4-x17
|
2016-08-31 05:08:32 +03:00
|
|
|
stp x2, x3, [x1, #CPU_XREG_OFFSET(0)]
|
|
|
|
stp x4, x5, [x1, #CPU_XREG_OFFSET(4)]
|
|
|
|
stp x6, x7, [x1, #CPU_XREG_OFFSET(6)]
|
|
|
|
stp x8, x9, [x1, #CPU_XREG_OFFSET(8)]
|
|
|
|
stp x10, x11, [x1, #CPU_XREG_OFFSET(10)]
|
|
|
|
stp x12, x13, [x1, #CPU_XREG_OFFSET(12)]
|
|
|
|
stp x14, x15, [x1, #CPU_XREG_OFFSET(14)]
|
|
|
|
stp x16, x17, [x1, #CPU_XREG_OFFSET(16)]
|
|
|
|
|
2019-12-07 01:13:39 +03:00
|
|
|
// Store the guest regs x18-x29, lr
|
2016-08-31 05:08:32 +03:00
|
|
|
save_callee_saved_regs x1
|
2015-10-22 10:32:18 +03:00
|
|
|
|
2017-10-08 18:01:56 +03:00
|
|
|
get_host_ctxt x2, x3
|
2015-10-22 10:32:18 +03:00
|
|
|
|
KVM: arm/arm64: Context-switch ptrauth registers
When pointer authentication is supported, a guest may wish to use it.
This patch adds the necessary KVM infrastructure for this to work, with
a semi-lazy context switch of the pointer auth state.
Pointer authentication feature is only enabled when VHE is built
in the kernel and present in the CPU implementation so only VHE code
paths are modified.
When we schedule a vcpu, we disable guest usage of pointer
authentication instructions and accesses to the keys. While these are
disabled, we avoid context-switching the keys. When we trap the guest
trying to use pointer authentication functionality, we change to eagerly
context-switching the keys, and enable the feature. The next time the
vcpu is scheduled out/in, we start again. However the host key save is
optimized and implemented inside ptrauth instruction/register access
trap.
Pointer authentication consists of address authentication and generic
authentication, and CPUs in a system might have varied support for
either. Where support for either feature is not uniform, it is hidden
from guests via ID register emulation, as a result of the cpufeature
framework in the host.
Unfortunately, address authentication and generic authentication cannot
be trapped separately, as the architecture provides a single EL2 trap
covering both. If we wish to expose one without the other, we cannot
prevent a (badly-written) guest from intermittently using a feature
which is not uniformly supported (when scheduled on a physical CPU which
supports the relevant feature). Hence, this patch expects both type of
authentication to be present in a cpu.
This switch of key is done from guest enter/exit assembly as preparation
for the upcoming in-kernel pointer authentication support. Hence, these
key switching routines are not implemented in C code as they may cause
pointer authentication key signing error in some situations.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
[Only VHE, key switch in full assembly, vcpu_has_ptrauth checks
, save host key in ptrauth exception trap]
Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Reviewed-by: Julien Thierry <julien.thierry@arm.com>
Cc: Christoffer Dall <christoffer.dall@arm.com>
Cc: kvmarm@lists.cs.columbia.edu
[maz: various fixups]
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2019-04-23 07:42:35 +03:00
|
|
|
// Macro ptrauth_switch_to_guest format:
|
|
|
|
// ptrauth_switch_to_host(guest cxt, host cxt, tmp1, tmp2, tmp3)
|
|
|
|
// The below macro to save/restore keys is not implemented in C code
|
|
|
|
// as it may cause Pointer Authentication key signing mismatch errors
|
|
|
|
// when this feature is enabled for kernel code.
|
|
|
|
ptrauth_switch_to_host x1, x2, x3, x4, x5
|
|
|
|
|
2015-10-22 10:32:18 +03:00
|
|
|
// Now restore the host regs
|
|
|
|
restore_callee_saved_regs x2
|
|
|
|
|
KVM: arm64: Handle RAS SErrors from EL2 on guest exit
We expect to have firmware-first handling of RAS SErrors, with errors
notified via an APEI method. For systems without firmware-first, add
some minimal handling to KVM.
There are two ways KVM can take an SError due to a guest, either may be a
RAS error: we exit the guest due to an SError routed to EL2 by HCR_EL2.AMO,
or we take an SError from EL2 when we unmask PSTATE.A from __guest_exit.
The current SError from EL2 code unmasks SError and tries to fence any
pending SError into a single instruction window. It then leaves SError
unmasked.
With the v8.2 RAS Extensions we may take an SError for a 'corrected'
error, but KVM is only able to handle SError from EL2 if they occur
during this single instruction window...
The RAS Extensions give us a new instruction to synchronise and
consume SErrors. The RAS Extensions document (ARM DDI0587),
'2.4.1 ESB and Unrecoverable errors' describes ESB as synchronising
SError interrupts generated by 'instructions, translation table walks,
hardware updates to the translation tables, and instruction fetches on
the same PE'. This makes ESB equivalent to KVMs existing
'dsb, mrs-daifclr, isb' sequence.
Use the alternatives to synchronise and consume any SError using ESB
instead of unmasking and taking the SError. Set ARM_EXIT_WITH_SERROR_BIT
in the exit_code so that we can restart the vcpu if it turns out this
SError has no impact on the vcpu.
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2018-01-15 22:39:05 +03:00
|
|
|
alternative_if ARM64_HAS_RAS_EXTN
|
|
|
|
// If we have the RAS extensions we can consume a pending error
|
2019-06-18 18:17:36 +03:00
|
|
|
// without an unmask-SError and isb. The ESB-instruction consumed any
|
|
|
|
// pending guest error when we took the exception from the guest.
|
KVM: arm64: Handle RAS SErrors from EL2 on guest exit
We expect to have firmware-first handling of RAS SErrors, with errors
notified via an APEI method. For systems without firmware-first, add
some minimal handling to KVM.
There are two ways KVM can take an SError due to a guest, either may be a
RAS error: we exit the guest due to an SError routed to EL2 by HCR_EL2.AMO,
or we take an SError from EL2 when we unmask PSTATE.A from __guest_exit.
The current SError from EL2 code unmasks SError and tries to fence any
pending SError into a single instruction window. It then leaves SError
unmasked.
With the v8.2 RAS Extensions we may take an SError for a 'corrected'
error, but KVM is only able to handle SError from EL2 if they occur
during this single instruction window...
The RAS Extensions give us a new instruction to synchronise and
consume SErrors. The RAS Extensions document (ARM DDI0587),
'2.4.1 ESB and Unrecoverable errors' describes ESB as synchronising
SError interrupts generated by 'instructions, translation table walks,
hardware updates to the translation tables, and instruction fetches on
the same PE'. This makes ESB equivalent to KVMs existing
'dsb, mrs-daifclr, isb' sequence.
Use the alternatives to synchronise and consume any SError using ESB
instead of unmasking and taking the SError. Set ARM_EXIT_WITH_SERROR_BIT
in the exit_code so that we can restart the vcpu if it turns out this
SError has no impact on the vcpu.
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2018-01-15 22:39:05 +03:00
|
|
|
mrs_s x2, SYS_DISR_EL1
|
|
|
|
str x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
|
|
|
|
cbz x2, 1f
|
|
|
|
msr_s SYS_DISR_EL1, xzr
|
|
|
|
orr x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
|
|
|
|
1: ret
|
|
|
|
alternative_else
|
2019-06-18 18:18:09 +03:00
|
|
|
dsb sy // Synchronize against in-flight ld/st
|
|
|
|
isb // Prevent an early read of side-effect free ISR
|
|
|
|
mrs x2, isr_el1
|
|
|
|
tbnz x2, #8, 2f // ISR_EL1.A
|
|
|
|
ret
|
|
|
|
nop
|
|
|
|
2:
|
|
|
|
alternative_endif
|
|
|
|
// We know we have a pending asynchronous abort, now is the
|
|
|
|
// time to flush it out. From your VAXorcist book, page 666:
|
2016-09-06 16:02:07 +03:00
|
|
|
// "Threaten me not, oh Evil one! For I speak with
|
|
|
|
// the power of DEC, and I command thee to show thyself!"
|
|
|
|
mrs x2, elr_el2
|
|
|
|
mrs x3, esr_el2
|
|
|
|
mrs x4, spsr_el2
|
|
|
|
mov x5, x0
|
|
|
|
|
|
|
|
msr daifclr, #4 // Unmask aborts
|
|
|
|
|
|
|
|
// This is our single instruction exception window. A pending
|
|
|
|
// SError is guaranteed to occur at the earliest when we unmask
|
|
|
|
// it, and at the latest just after the ISB.
|
|
|
|
.global abort_guest_exit_start
|
|
|
|
abort_guest_exit_start:
|
|
|
|
|
|
|
|
isb
|
|
|
|
|
|
|
|
.global abort_guest_exit_end
|
|
|
|
abort_guest_exit_end:
|
|
|
|
|
2019-06-18 18:18:08 +03:00
|
|
|
msr daifset, #4 // Mask aborts
|
|
|
|
|
2016-09-06 16:02:07 +03:00
|
|
|
// If the exception took place, restore the EL1 exception
|
|
|
|
// context so that we can report some information.
|
|
|
|
// Merge the exception code with the SError pending bit.
|
|
|
|
tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f
|
|
|
|
msr elr_el2, x2
|
|
|
|
msr esr_el2, x3
|
|
|
|
msr spsr_el2, x4
|
|
|
|
orr x0, x0, x5
|
|
|
|
1: ret
|
2020-01-20 15:47:06 +03:00
|
|
|
SYM_FUNC_END(__guest_enter)
|