2019-06-03 08:44:50 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2012-12-10 20:23:59 +04:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012,2013 - ARM Ltd
|
|
|
|
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
|
|
|
*
|
|
|
|
* Derived from arch/arm/kvm/reset.c
|
|
|
|
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
|
|
|
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/errno.h>
|
2019-02-28 21:56:50 +03:00
|
|
|
#include <linux/kernel.h>
|
2012-12-10 20:23:59 +04:00
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
#include <linux/kvm.h>
|
2015-07-07 19:30:02 +03:00
|
|
|
#include <linux/hw_breakpoint.h>
|
2019-02-28 21:46:44 +03:00
|
|
|
#include <linux/slab.h>
|
2019-02-28 21:56:50 +03:00
|
|
|
#include <linux/string.h>
|
2019-02-28 21:46:44 +03:00
|
|
|
#include <linux/types.h>
|
2012-12-10 20:23:59 +04:00
|
|
|
|
2012-12-07 21:52:03 +04:00
|
|
|
#include <kvm/arm_arch_timer.h>
|
|
|
|
|
2018-09-26 19:32:43 +03:00
|
|
|
#include <asm/cpufeature.h>
|
2012-12-10 20:23:59 +04:00
|
|
|
#include <asm/cputype.h>
|
2019-02-28 21:46:44 +03:00
|
|
|
#include <asm/fpsimd.h>
|
2012-12-10 20:23:59 +04:00
|
|
|
#include <asm/ptrace.h>
|
|
|
|
#include <asm/kvm_arm.h>
|
arm64: kvm: allows kvm cpu hotplug
The current kvm implementation on arm64 does cpu-specific initialization
at system boot, and has no way to gracefully shutdown a core in terms of
kvm. This prevents kexec from rebooting the system at EL2.
This patch adds a cpu tear-down function and also puts an existing cpu-init
code into a separate function, kvm_arch_hardware_disable() and
kvm_arch_hardware_enable() respectively.
We don't need the arm64 specific cpu hotplug hook any more.
Since this patch modifies common code between arm and arm64, one stub
definition, __cpu_reset_hyp_mode(), is added on arm side to avoid
compilation errors.
Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org>
[Rebase, added separate VHE init/exit path, changed resets use of
kvm_call_hyp() to the __version, en/disabled hardware in init_subsystems(),
added icache maintenance to __kvm_hyp_reset() and removed lr restore, removed
guest-enter after teardown handling]
Signed-off-by: James Morse <james.morse@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-04-27 19:47:05 +03:00
|
|
|
#include <asm/kvm_asm.h>
|
2018-12-20 14:36:07 +03:00
|
|
|
#include <asm/kvm_emulate.h>
|
arm64: kvm: allows kvm cpu hotplug
The current kvm implementation on arm64 does cpu-specific initialization
at system boot, and has no way to gracefully shutdown a core in terms of
kvm. This prevents kexec from rebooting the system at EL2.
This patch adds a cpu tear-down function and also puts an existing cpu-init
code into a separate function, kvm_arch_hardware_disable() and
kvm_arch_hardware_enable() respectively.
We don't need the arm64 specific cpu hotplug hook any more.
Since this patch modifies common code between arm and arm64, one stub
definition, __cpu_reset_hyp_mode(), is added on arm side to avoid
compilation errors.
Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org>
[Rebase, added separate VHE init/exit path, changed resets use of
kvm_call_hyp() to the __version, en/disabled hardware in init_subsystems(),
added icache maintenance to __kvm_hyp_reset() and removed lr restore, removed
guest-enter after teardown handling]
Signed-off-by: James Morse <james.morse@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-04-27 19:47:05 +03:00
|
|
|
#include <asm/kvm_mmu.h>
|
2023-02-09 20:58:06 +03:00
|
|
|
#include <asm/kvm_nested.h>
|
2019-02-28 21:56:50 +03:00
|
|
|
#include <asm/virt.h>
|
2012-12-10 20:23:59 +04:00
|
|
|
|
2018-09-26 19:32:52 +03:00
|
|
|
/* Maximum phys_shift supported for any VM on this host */
|
KVM: x86: Unify pr_fmt to use module name for all KVM modules
Define pr_fmt using KBUILD_MODNAME for all KVM x86 code so that printks
use consistent formatting across common x86, Intel, and AMD code. In
addition to providing consistent print formatting, using KBUILD_MODNAME,
e.g. kvm_amd and kvm_intel, allows referencing SVM and VMX (and SEV and
SGX and ...) as technologies without generating weird messages, and
without causing naming conflicts with other kernel code, e.g. "SEV: ",
"tdx: ", "sgx: " etc.. are all used by the kernel for non-KVM subsystems.
Opportunistically move away from printk() for prints that need to be
modified anyways, e.g. to drop a manual "kvm: " prefix.
Opportunistically convert a few SGX WARNs that are similarly modified to
WARN_ONCE; in the very unlikely event that the WARNs fire, odds are good
that they would fire repeatedly and spam the kernel log without providing
unique information in each print.
Note, defining pr_fmt yields undesirable results for code that uses KVM's
printk wrappers, e.g. vcpu_unimpl(). But, that's a pre-existing problem
as SVM/kvm_amd already defines a pr_fmt, and thankfully use of KVM's
wrappers is relatively limited in KVM x86 code.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Paul Durrant <paul@xen.org>
Message-Id: <20221130230934.1014142-35-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-12-01 02:09:18 +03:00
|
|
|
static u32 __ro_after_init kvm_ipa_limit;
|
2018-09-26 19:32:52 +03:00
|
|
|
|
2012-12-10 20:23:59 +04:00
|
|
|
/*
|
|
|
|
* ARMv8 Reset Values
|
|
|
|
*/
|
2020-04-12 20:49:31 +03:00
|
|
|
#define VCPU_RESET_PSTATE_EL1 (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | \
|
|
|
|
PSR_F_BIT | PSR_D_BIT)
|
2012-12-10 20:23:59 +04:00
|
|
|
|
2023-02-09 20:58:06 +03:00
|
|
|
#define VCPU_RESET_PSTATE_EL2 (PSR_MODE_EL2h | PSR_A_BIT | PSR_I_BIT | \
|
|
|
|
PSR_F_BIT | PSR_D_BIT)
|
|
|
|
|
2020-04-12 20:49:31 +03:00
|
|
|
#define VCPU_RESET_PSTATE_SVC (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \
|
|
|
|
PSR_AA32_I_BIT | PSR_AA32_F_BIT)
|
2013-02-07 14:46:46 +04:00
|
|
|
|
KVM: x86: Unify pr_fmt to use module name for all KVM modules
Define pr_fmt using KBUILD_MODNAME for all KVM x86 code so that printks
use consistent formatting across common x86, Intel, and AMD code. In
addition to providing consistent print formatting, using KBUILD_MODNAME,
e.g. kvm_amd and kvm_intel, allows referencing SVM and VMX (and SEV and
SGX and ...) as technologies without generating weird messages, and
without causing naming conflicts with other kernel code, e.g. "SEV: ",
"tdx: ", "sgx: " etc.. are all used by the kernel for non-KVM subsystems.
Opportunistically move away from printk() for prints that need to be
modified anyways, e.g. to drop a manual "kvm: " prefix.
Opportunistically convert a few SGX WARNs that are similarly modified to
WARN_ONCE; in the very unlikely event that the WARNs fire, odds are good
that they would fire repeatedly and spam the kernel log without providing
unique information in each print.
Note, defining pr_fmt yields undesirable results for code that uses KVM's
printk wrappers, e.g. vcpu_unimpl(). But, that's a pre-existing problem
as SVM/kvm_amd already defines a pr_fmt, and thankfully use of KVM's
wrappers is relatively limited in KVM x86 code.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Paul Durrant <paul@xen.org>
Message-Id: <20221130230934.1014142-35-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-12-01 02:09:18 +03:00
|
|
|
unsigned int __ro_after_init kvm_sve_max_vl;
|
2019-02-28 21:46:44 +03:00
|
|
|
|
KVM: x86: Unify pr_fmt to use module name for all KVM modules
Define pr_fmt using KBUILD_MODNAME for all KVM x86 code so that printks
use consistent formatting across common x86, Intel, and AMD code. In
addition to providing consistent print formatting, using KBUILD_MODNAME,
e.g. kvm_amd and kvm_intel, allows referencing SVM and VMX (and SEV and
SGX and ...) as technologies without generating weird messages, and
without causing naming conflicts with other kernel code, e.g. "SEV: ",
"tdx: ", "sgx: " etc.. are all used by the kernel for non-KVM subsystems.
Opportunistically move away from printk() for prints that need to be
modified anyways, e.g. to drop a manual "kvm: " prefix.
Opportunistically convert a few SGX WARNs that are similarly modified to
WARN_ONCE; in the very unlikely event that the WARNs fire, odds are good
that they would fire repeatedly and spam the kernel log without providing
unique information in each print.
Note, defining pr_fmt yields undesirable results for code that uses KVM's
printk wrappers, e.g. vcpu_unimpl(). But, that's a pre-existing problem
as SVM/kvm_amd already defines a pr_fmt, and thankfully use of KVM's
wrappers is relatively limited in KVM x86 code.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Paul Durrant <paul@xen.org>
Message-Id: <20221130230934.1014142-35-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-12-01 02:09:18 +03:00
|
|
|
int __init kvm_arm_init_sve(void)
|
2019-02-28 21:46:44 +03:00
|
|
|
{
|
|
|
|
if (system_supports_sve()) {
|
2021-10-19 20:22:12 +03:00
|
|
|
kvm_sve_max_vl = sve_max_virtualisable_vl();
|
2019-02-28 21:46:44 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The get_sve_reg()/set_sve_reg() ioctl interface will need
|
|
|
|
* to be extended with multiple register slice support in
|
|
|
|
* order to support vector lengths greater than
|
2021-12-10 21:40:58 +03:00
|
|
|
* VL_ARCH_MAX:
|
2019-02-28 21:46:44 +03:00
|
|
|
*/
|
2021-12-10 21:40:58 +03:00
|
|
|
if (WARN_ON(kvm_sve_max_vl > VL_ARCH_MAX))
|
|
|
|
kvm_sve_max_vl = VL_ARCH_MAX;
|
2019-02-28 21:46:44 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't even try to make use of vector lengths that
|
|
|
|
* aren't available on all CPUs, for now:
|
|
|
|
*/
|
2021-10-19 20:22:12 +03:00
|
|
|
if (kvm_sve_max_vl < sve_max_vl())
|
2019-02-28 21:46:44 +03:00
|
|
|
pr_warn("KVM: SVE vector length for guests limited to %u bytes\n",
|
|
|
|
kvm_sve_max_vl);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-28 21:56:50 +03:00
|
|
|
static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (!system_supports_sve())
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
vcpu->arch.sve_max_vl = kvm_sve_max_vl;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Userspace can still customize the vector lengths by writing
|
|
|
|
* KVM_REG_ARM64_SVE_VLS. Allocation is deferred until
|
|
|
|
* kvm_arm_vcpu_finalize(), which freezes the configuration.
|
|
|
|
*/
|
2022-05-28 14:38:17 +03:00
|
|
|
vcpu_set_flag(vcpu, GUEST_HAS_SVE);
|
2019-02-28 21:56:50 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-28 21:46:44 +03:00
|
|
|
/*
|
|
|
|
* Finalize vcpu's maximum SVE vector length, allocating
|
|
|
|
* vcpu->arch.sve_state as necessary.
|
|
|
|
*/
|
|
|
|
static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
void *buf;
|
|
|
|
unsigned int vl;
|
2021-10-14 12:24:48 +03:00
|
|
|
size_t reg_sz;
|
|
|
|
int ret;
|
2019-02-28 21:46:44 +03:00
|
|
|
|
|
|
|
vl = vcpu->arch.sve_max_vl;
|
|
|
|
|
|
|
|
/*
|
2020-04-01 17:03:10 +03:00
|
|
|
* Responsibility for these properties is shared between
|
2021-12-30 17:15:35 +03:00
|
|
|
* kvm_arm_init_sve(), kvm_vcpu_enable_sve() and
|
2019-02-28 21:46:44 +03:00
|
|
|
* set_sve_vls(). Double-check here just to be sure:
|
|
|
|
*/
|
2021-10-19 20:22:12 +03:00
|
|
|
if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl() ||
|
2021-12-10 21:40:58 +03:00
|
|
|
vl > VL_ARCH_MAX))
|
2019-02-28 21:46:44 +03:00
|
|
|
return -EIO;
|
|
|
|
|
2021-10-14 12:24:48 +03:00
|
|
|
reg_sz = vcpu_sve_state_size(vcpu);
|
|
|
|
buf = kzalloc(reg_sz, GFP_KERNEL_ACCOUNT);
|
2019-02-28 21:46:44 +03:00
|
|
|
if (!buf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2021-12-15 19:12:23 +03:00
|
|
|
ret = kvm_share_hyp(buf, buf + reg_sz);
|
2021-10-14 12:24:48 +03:00
|
|
|
if (ret) {
|
|
|
|
kfree(buf);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-02-28 21:46:44 +03:00
|
|
|
vcpu->arch.sve_state = buf;
|
2022-05-28 14:38:17 +03:00
|
|
|
vcpu_set_flag(vcpu, VCPU_SVE_FINALIZED);
|
2019-02-28 21:46:44 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-04-10 19:17:37 +03:00
|
|
|
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
|
2019-02-28 21:46:44 +03:00
|
|
|
{
|
2019-04-10 19:17:37 +03:00
|
|
|
switch (feature) {
|
2019-02-28 21:46:44 +03:00
|
|
|
case KVM_ARM_VCPU_SVE:
|
|
|
|
if (!vcpu_has_sve(vcpu))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (kvm_arm_vcpu_sve_finalized(vcpu))
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
return kvm_vcpu_finalize_sve(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-12-19 00:55:27 +03:00
|
|
|
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
|
2019-02-28 21:46:44 +03:00
|
|
|
{
|
2021-12-15 19:12:31 +03:00
|
|
|
void *sve_state = vcpu->arch.sve_state;
|
|
|
|
|
|
|
|
kvm_vcpu_unshare_task_fp(vcpu);
|
|
|
|
kvm_unshare_hyp(vcpu, vcpu + 1);
|
|
|
|
if (sve_state)
|
|
|
|
kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu));
|
|
|
|
kfree(sve_state);
|
2023-01-12 05:38:52 +03:00
|
|
|
kfree(vcpu->arch.ccsidr);
|
2019-02-28 21:46:44 +03:00
|
|
|
}
|
|
|
|
|
2019-02-28 21:56:50 +03:00
|
|
|
static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (vcpu_has_sve(vcpu))
|
|
|
|
memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu));
|
|
|
|
}
|
|
|
|
|
2019-04-23 07:42:36 +03:00
|
|
|
static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* For now make sure that both address/generic pointer authentication
|
2020-06-11 15:20:28 +03:00
|
|
|
* features are requested by the userspace together and the system
|
|
|
|
* supports these capabilities.
|
2019-04-23 07:42:36 +03:00
|
|
|
*/
|
|
|
|
if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
|
2020-06-11 15:20:28 +03:00
|
|
|
!test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features) ||
|
|
|
|
!system_has_full_ptr_auth())
|
2019-04-23 07:42:36 +03:00
|
|
|
return -EINVAL;
|
|
|
|
|
2022-05-28 14:38:17 +03:00
|
|
|
vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH);
|
2019-04-23 07:42:36 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-03-29 06:19:23 +03:00
|
|
|
/**
|
|
|
|
* kvm_set_vm_width() - set the register width for the guest
|
|
|
|
* @vcpu: Pointer to the vcpu being configured
|
|
|
|
*
|
|
|
|
* Set both KVM_ARCH_FLAG_EL1_32BIT and KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED
|
|
|
|
* in the VM flags based on the vcpu's requested register width, the HW
|
|
|
|
* capabilities and other options (such as MTE).
|
|
|
|
* When REG_WIDTH_CONFIGURED is already set, the vcpu settings must be
|
|
|
|
* consistent with the value of the FLAG_EL1_32BIT bit in the flags.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, negative error code on failure.
|
|
|
|
*/
|
|
|
|
static int kvm_set_vm_width(struct kvm_vcpu *vcpu)
|
2021-05-24 20:07:52 +03:00
|
|
|
{
|
2022-03-29 06:19:23 +03:00
|
|
|
struct kvm *kvm = vcpu->kvm;
|
2021-05-24 20:07:52 +03:00
|
|
|
bool is32bit;
|
|
|
|
|
|
|
|
is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
|
2022-03-29 06:19:23 +03:00
|
|
|
|
|
|
|
lockdep_assert_held(&kvm->lock);
|
|
|
|
|
|
|
|
if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) {
|
|
|
|
/*
|
|
|
|
* The guest's register width is already configured.
|
|
|
|
* Make sure that the vcpu is consistent with it.
|
|
|
|
*/
|
|
|
|
if (is32bit == test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2021-05-24 20:07:52 +03:00
|
|
|
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
|
2022-03-29 06:19:23 +03:00
|
|
|
return -EINVAL;
|
2021-05-24 20:07:52 +03:00
|
|
|
|
2021-06-21 14:17:14 +03:00
|
|
|
/* MTE is incompatible with AArch32 */
|
2022-03-29 06:19:23 +03:00
|
|
|
if (kvm_has_mte(kvm) && is32bit)
|
|
|
|
return -EINVAL;
|
2021-06-21 14:17:14 +03:00
|
|
|
|
2023-02-09 20:58:06 +03:00
|
|
|
/* NV is incompatible with AArch32 */
|
|
|
|
if (vcpu_has_nv(vcpu) && is32bit)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2022-03-29 06:19:23 +03:00
|
|
|
if (is32bit)
|
|
|
|
set_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
|
2021-05-24 20:07:52 +03:00
|
|
|
|
2022-03-29 06:19:23 +03:00
|
|
|
set_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags);
|
|
|
|
|
|
|
|
return 0;
|
2021-05-24 20:07:52 +03:00
|
|
|
}
|
|
|
|
|
2012-12-10 20:23:59 +04:00
|
|
|
/**
|
|
|
|
* kvm_reset_vcpu - sets core registers and sys_regs to reset value
|
|
|
|
* @vcpu: The VCPU pointer
|
|
|
|
*
|
2021-12-08 22:32:56 +03:00
|
|
|
* This function sets the registers on the virtual CPU struct to their
|
|
|
|
* architecturally defined reset values, except for registers whose reset is
|
|
|
|
* deferred until kvm_arm_vcpu_finalize().
|
2018-12-20 14:44:05 +03:00
|
|
|
*
|
|
|
|
* Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
|
|
|
|
* ioctl or as part of handling a request issued by another VCPU in the PSCI
|
|
|
|
* handling code. In the first case, the VCPU will not be loaded, and in the
|
|
|
|
* second case the VCPU will be loaded. Because this function operates purely
|
2020-04-01 17:03:10 +03:00
|
|
|
* on the memory-backed values of system registers, we want to do a full put if
|
2018-12-20 14:44:05 +03:00
|
|
|
* we were loaded (handling a request) and load the values back at the end of
|
|
|
|
* the function. Otherwise we leave the state alone. In both cases, we
|
|
|
|
* disable preemption around the vcpu reset as we would otherwise race with
|
|
|
|
* preempt notifiers which also call put/load.
|
2012-12-10 20:23:59 +04:00
|
|
|
*/
|
|
|
|
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2021-08-18 23:21:30 +03:00
|
|
|
struct vcpu_reset_state reset_state;
|
2020-06-17 13:54:56 +03:00
|
|
|
int ret;
|
2018-12-20 14:44:05 +03:00
|
|
|
bool loaded;
|
2020-04-12 20:49:31 +03:00
|
|
|
u32 pstate;
|
2018-12-20 14:44:05 +03:00
|
|
|
|
2021-08-18 23:21:30 +03:00
|
|
|
mutex_lock(&vcpu->kvm->lock);
|
2022-03-29 06:19:23 +03:00
|
|
|
ret = kvm_set_vm_width(vcpu);
|
|
|
|
if (!ret) {
|
|
|
|
reset_state = vcpu->arch.reset_state;
|
|
|
|
WRITE_ONCE(vcpu->arch.reset_state.reset, false);
|
|
|
|
}
|
2021-08-18 23:21:30 +03:00
|
|
|
mutex_unlock(&vcpu->kvm->lock);
|
|
|
|
|
2022-03-29 06:19:23 +03:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2019-03-04 20:37:44 +03:00
|
|
|
/* Reset PMU outside of the non-preemptible section */
|
|
|
|
kvm_pmu_vcpu_reset(vcpu);
|
|
|
|
|
2018-12-20 14:44:05 +03:00
|
|
|
preempt_disable();
|
|
|
|
loaded = (vcpu->cpu != -1);
|
|
|
|
if (loaded)
|
|
|
|
kvm_arch_vcpu_put(vcpu);
|
2012-12-10 20:23:59 +04:00
|
|
|
|
2023-02-09 20:58:06 +03:00
|
|
|
/* Disallow NV+SVE for the time being */
|
|
|
|
if (vcpu_has_nv(vcpu) && vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-02-28 21:56:50 +03:00
|
|
|
if (!kvm_arm_vcpu_sve_finalized(vcpu)) {
|
|
|
|
if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) {
|
|
|
|
ret = kvm_vcpu_enable_sve(vcpu);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
kvm_vcpu_reset_sve(vcpu);
|
|
|
|
}
|
|
|
|
|
2019-04-23 07:42:36 +03:00
|
|
|
if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
|
|
|
|
test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) {
|
2020-06-17 13:54:56 +03:00
|
|
|
if (kvm_vcpu_enable_ptrauth(vcpu)) {
|
|
|
|
ret = -EINVAL;
|
2019-04-23 07:42:36 +03:00
|
|
|
goto out;
|
2020-06-17 13:54:56 +03:00
|
|
|
}
|
2019-04-23 07:42:36 +03:00
|
|
|
}
|
|
|
|
|
2012-12-10 20:23:59 +04:00
|
|
|
switch (vcpu->arch.target) {
|
|
|
|
default:
|
2022-03-29 06:19:23 +03:00
|
|
|
if (vcpu_el1_is_32bit(vcpu)) {
|
2020-04-12 20:49:31 +03:00
|
|
|
pstate = VCPU_RESET_PSTATE_SVC;
|
2023-02-09 20:58:06 +03:00
|
|
|
} else if (vcpu_has_nv(vcpu)) {
|
|
|
|
pstate = VCPU_RESET_PSTATE_EL2;
|
2013-02-07 14:46:46 +04:00
|
|
|
} else {
|
2020-04-12 20:49:31 +03:00
|
|
|
pstate = VCPU_RESET_PSTATE_EL1;
|
2013-02-07 14:46:46 +04:00
|
|
|
}
|
|
|
|
|
2020-11-12 21:13:27 +03:00
|
|
|
if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2012-12-10 20:23:59 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Reset core registers */
|
2020-04-12 20:49:31 +03:00
|
|
|
memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
|
2021-04-07 20:54:16 +03:00
|
|
|
memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
|
|
|
|
vcpu->arch.ctxt.spsr_abt = 0;
|
|
|
|
vcpu->arch.ctxt.spsr_und = 0;
|
|
|
|
vcpu->arch.ctxt.spsr_irq = 0;
|
|
|
|
vcpu->arch.ctxt.spsr_fiq = 0;
|
2019-06-29 00:40:58 +03:00
|
|
|
vcpu_gp_regs(vcpu)->pstate = pstate;
|
2012-12-10 20:23:59 +04:00
|
|
|
|
|
|
|
/* Reset system registers */
|
|
|
|
kvm_reset_sys_regs(vcpu);
|
|
|
|
|
2018-12-20 14:36:07 +03:00
|
|
|
/*
|
|
|
|
* Additional reset state handling that PSCI may have imposed on us.
|
|
|
|
* Must be done after all the sys_reg reset.
|
|
|
|
*/
|
2021-08-18 23:21:30 +03:00
|
|
|
if (reset_state.reset) {
|
|
|
|
unsigned long target_pc = reset_state.pc;
|
2018-12-20 14:36:07 +03:00
|
|
|
|
|
|
|
/* Gracefully handle Thumb2 entry point */
|
|
|
|
if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
|
|
|
|
target_pc &= ~1UL;
|
|
|
|
vcpu_set_thumb(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Propagate caller endianness */
|
2021-08-18 23:21:30 +03:00
|
|
|
if (reset_state.be)
|
2018-12-20 14:36:07 +03:00
|
|
|
kvm_vcpu_set_be(vcpu);
|
|
|
|
|
|
|
|
*vcpu_pc(vcpu) = target_pc;
|
2021-08-18 23:21:30 +03:00
|
|
|
vcpu_set_reg(vcpu, 0, reset_state.r0);
|
2018-12-20 14:36:07 +03:00
|
|
|
}
|
|
|
|
|
2012-12-07 21:52:03 +04:00
|
|
|
/* Reset timer */
|
2018-12-20 14:44:05 +03:00
|
|
|
ret = kvm_timer_vcpu_reset(vcpu);
|
|
|
|
out:
|
|
|
|
if (loaded)
|
|
|
|
kvm_arch_vcpu_load(vcpu, smp_processor_id());
|
|
|
|
preempt_enable();
|
|
|
|
return ret;
|
2012-12-10 20:23:59 +04:00
|
|
|
}
|
2018-09-26 19:32:42 +03:00
|
|
|
|
2020-05-12 04:57:27 +03:00
|
|
|
u32 get_kvm_ipa_limit(void)
|
|
|
|
{
|
|
|
|
return kvm_ipa_limit;
|
|
|
|
}
|
|
|
|
|
KVM: x86: Unify pr_fmt to use module name for all KVM modules
Define pr_fmt using KBUILD_MODNAME for all KVM x86 code so that printks
use consistent formatting across common x86, Intel, and AMD code. In
addition to providing consistent print formatting, using KBUILD_MODNAME,
e.g. kvm_amd and kvm_intel, allows referencing SVM and VMX (and SEV and
SGX and ...) as technologies without generating weird messages, and
without causing naming conflicts with other kernel code, e.g. "SEV: ",
"tdx: ", "sgx: " etc.. are all used by the kernel for non-KVM subsystems.
Opportunistically move away from printk() for prints that need to be
modified anyways, e.g. to drop a manual "kvm: " prefix.
Opportunistically convert a few SGX WARNs that are similarly modified to
WARN_ONCE; in the very unlikely event that the WARNs fire, odds are good
that they would fire repeatedly and spam the kernel log without providing
unique information in each print.
Note, defining pr_fmt yields undesirable results for code that uses KVM's
printk wrappers, e.g. vcpu_unimpl(). But, that's a pre-existing problem
as SVM/kvm_amd already defines a pr_fmt, and thankfully use of KVM's
wrappers is relatively limited in KVM x86 code.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Paul Durrant <paul@xen.org>
Message-Id: <20221130230934.1014142-35-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-12-01 02:09:18 +03:00
|
|
|
int __init kvm_set_ipa_limit(void)
|
2018-09-26 19:32:52 +03:00
|
|
|
{
|
2021-08-10 07:29:42 +03:00
|
|
|
unsigned int parange;
|
2020-05-13 12:03:34 +03:00
|
|
|
u64 mmfr0;
|
2018-09-26 19:32:52 +03:00
|
|
|
|
2020-05-13 12:03:34 +03:00
|
|
|
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
|
|
|
parange = cpuid_feature_extract_unsigned_field(mmfr0,
|
2022-09-06 01:54:01 +03:00
|
|
|
ID_AA64MMFR0_EL1_PARANGE_SHIFT);
|
2021-08-11 14:11:15 +03:00
|
|
|
/*
|
|
|
|
* IPA size beyond 48 bits could not be supported
|
|
|
|
* on either 4K or 16K page size. Hence let's cap
|
|
|
|
* it to 48 bits, in case it's reported as larger
|
|
|
|
* on the system.
|
|
|
|
*/
|
|
|
|
if (PAGE_SIZE != SZ_64K)
|
2022-09-06 01:54:01 +03:00
|
|
|
parange = min(parange, (unsigned int)ID_AA64MMFR0_EL1_PARANGE_48);
|
2020-05-28 16:12:58 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
|
|
|
|
* Stage-2. If not, things will stop very quickly.
|
|
|
|
*/
|
2022-09-06 01:54:01 +03:00
|
|
|
switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_TGRAN_2_SHIFT)) {
|
|
|
|
case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE:
|
2020-05-28 16:12:58 +03:00
|
|
|
kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
|
|
|
|
return -EINVAL;
|
2022-09-06 01:54:01 +03:00
|
|
|
case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT:
|
2020-05-28 16:12:58 +03:00
|
|
|
kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
|
|
|
|
break;
|
2022-09-06 01:54:01 +03:00
|
|
|
case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX:
|
2020-05-28 16:12:58 +03:00
|
|
|
kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
|
|
|
|
break;
|
2021-03-10 08:53:10 +03:00
|
|
|
default:
|
|
|
|
kvm_err("Unsupported value for TGRAN_2, giving up\n");
|
|
|
|
return -EINVAL;
|
2020-05-28 16:12:58 +03:00
|
|
|
}
|
|
|
|
|
2020-09-11 16:25:29 +03:00
|
|
|
kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange);
|
2021-03-11 13:00:15 +03:00
|
|
|
kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit,
|
|
|
|
((kvm_ipa_limit < KVM_PHYS_SHIFT) ?
|
|
|
|
" (Reduced IPA size, limited VM/VMM compatibility)" : ""));
|
2020-05-28 16:12:58 +03:00
|
|
|
|
|
|
|
return 0;
|
2018-09-26 19:32:52 +03:00
|
|
|
}
|