Updates for KVM/ARM, take 3 supporting more than 4 CPUs.
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.11 (GNU/Linux) iQEcBAABAgAGBQJSfD2NAAoJEEtpOizt6ddyK0IH+wQf6Hwe2nLAhj86knqDHsPt LgJ6ZSY2bhWTKhCVDkH4HQt6ZqWEV7P8HsLNLc9FxjxCIgGO6Lp6Obv6sYscZrvh OdzsZ/+j1t035qmeLwBJnB2x+j21ACd5LYVaWkfJPmGJ40KrcgP/t3fj3r1le91Z jUM5BZY8fbEpJ1JaLoYZIEm1nYQJ4cabkqb9dFieqbVB1OoYw5W7KCV0wazOpg+f T2WL8Dy+lP8DfGRrEjsIM299DlMAFfUCj7mgfO3yTDUK0Q6Q3ZN7f394c+04OfZv /V1fM4y4X1i/2wlL3oPaf1WtYhwd2VxJwo1n3SLg1b9gWghYnqUR/vBHrUmzX/A= =2ESp -----END PGP SIGNATURE----- Merge tag 'kvm-arm-for-3.13-3' of git://git.linaro.org/people/cdall/linux-kvm-arm into kvm-next Updates for KVM/ARM, take 3 supporting more than 4 CPUs. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Conflicts: arch/arm/kvm/reset.c [cpu_reset->reset_regs change; context only]
This commit is contained in:
Коммит
6da8ae556c
|
@ -157,4 +157,9 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
|
||||||
return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
|
return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
return vcpu->arch.cp15[c0_MPIDR];
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* __ARM_KVM_EMULATE_H__ */
|
#endif /* __ARM_KVM_EMULATE_H__ */
|
||||||
|
|
|
@ -74,11 +74,13 @@ int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||||
static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
|
static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Compute guest MPIDR. No need to mess around with different clusters
|
* Compute guest MPIDR. We build a virtual cluster out of the
|
||||||
* but we read the 'U' bit from the underlying hardware directly.
|
* vcpu_id, but we read the 'U' bit from the underlying
|
||||||
|
* hardware directly.
|
||||||
*/
|
*/
|
||||||
vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & MPIDR_SMP_BITMASK)
|
vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
|
||||||
| vcpu->vcpu_id;
|
((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) |
|
||||||
|
(vcpu->vcpu_id & 3));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */
|
/* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */
|
||||||
|
@ -122,6 +124,10 @@ static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
|
||||||
asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
|
asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
|
||||||
l2ctlr &= ~(3 << 24);
|
l2ctlr &= ~(3 << 24);
|
||||||
ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
|
ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
|
||||||
|
/* How many cores in the current cluster and the next ones */
|
||||||
|
ncores -= (vcpu->vcpu_id & ~3);
|
||||||
|
/* Cap it to the maximum number of cores in a single cluster */
|
||||||
|
ncores = min(ncores, 3U);
|
||||||
l2ctlr |= (ncores & 3) << 24;
|
l2ctlr |= (ncores & 3) << 24;
|
||||||
|
|
||||||
vcpu->arch.cp15[c9_L2CTLR] = l2ctlr;
|
vcpu->arch.cp15[c9_L2CTLR] = l2ctlr;
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#include <linux/kvm_host.h>
|
#include <linux/kvm_host.h>
|
||||||
#include <linux/wait.h>
|
#include <linux/wait.h>
|
||||||
|
|
||||||
|
#include <asm/cputype.h>
|
||||||
#include <asm/kvm_emulate.h>
|
#include <asm/kvm_emulate.h>
|
||||||
#include <asm/kvm_psci.h>
|
#include <asm/kvm_psci.h>
|
||||||
|
|
||||||
|
@ -34,22 +35,30 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
|
||||||
static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
||||||
{
|
{
|
||||||
struct kvm *kvm = source_vcpu->kvm;
|
struct kvm *kvm = source_vcpu->kvm;
|
||||||
struct kvm_vcpu *vcpu;
|
struct kvm_vcpu *vcpu = NULL, *tmp;
|
||||||
wait_queue_head_t *wq;
|
wait_queue_head_t *wq;
|
||||||
unsigned long cpu_id;
|
unsigned long cpu_id;
|
||||||
|
unsigned long mpidr;
|
||||||
phys_addr_t target_pc;
|
phys_addr_t target_pc;
|
||||||
|
int i;
|
||||||
|
|
||||||
cpu_id = *vcpu_reg(source_vcpu, 1);
|
cpu_id = *vcpu_reg(source_vcpu, 1);
|
||||||
if (vcpu_mode_is_32bit(source_vcpu))
|
if (vcpu_mode_is_32bit(source_vcpu))
|
||||||
cpu_id &= ~((u32) 0);
|
cpu_id &= ~((u32) 0);
|
||||||
|
|
||||||
if (cpu_id >= atomic_read(&kvm->online_vcpus))
|
kvm_for_each_vcpu(i, tmp, kvm) {
|
||||||
|
mpidr = kvm_vcpu_get_mpidr(tmp);
|
||||||
|
if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) {
|
||||||
|
vcpu = tmp;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!vcpu)
|
||||||
return KVM_PSCI_RET_INVAL;
|
return KVM_PSCI_RET_INVAL;
|
||||||
|
|
||||||
target_pc = *vcpu_reg(source_vcpu, 2);
|
target_pc = *vcpu_reg(source_vcpu, 2);
|
||||||
|
|
||||||
vcpu = kvm_get_vcpu(kvm, cpu_id);
|
|
||||||
|
|
||||||
wq = kvm_arch_vcpu_wq(vcpu);
|
wq = kvm_arch_vcpu_wq(vcpu);
|
||||||
if (!waitqueue_active(wq))
|
if (!waitqueue_active(wq))
|
||||||
return KVM_PSCI_RET_INVAL;
|
return KVM_PSCI_RET_INVAL;
|
||||||
|
|
|
@ -33,8 +33,6 @@
|
||||||
* Cortex-A15 and Cortex-A7 Reset Values
|
* Cortex-A15 and Cortex-A7 Reset Values
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static const int cortexa_max_cpu_idx = 3;
|
|
||||||
|
|
||||||
static struct kvm_regs cortexa_regs_reset = {
|
static struct kvm_regs cortexa_regs_reset = {
|
||||||
.usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT,
|
.usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT,
|
||||||
};
|
};
|
||||||
|
@ -64,8 +62,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
||||||
switch (vcpu->arch.target) {
|
switch (vcpu->arch.target) {
|
||||||
case KVM_ARM_TARGET_CORTEX_A7:
|
case KVM_ARM_TARGET_CORTEX_A7:
|
||||||
case KVM_ARM_TARGET_CORTEX_A15:
|
case KVM_ARM_TARGET_CORTEX_A15:
|
||||||
if (vcpu->vcpu_id > cortexa_max_cpu_idx)
|
|
||||||
return -EINVAL;
|
|
||||||
reset_regs = &cortexa_regs_reset;
|
reset_regs = &cortexa_regs_reset;
|
||||||
vcpu->arch.midr = read_cpuid_id();
|
vcpu->arch.midr = read_cpuid_id();
|
||||||
cpu_vtimer_irq = &cortexa_vtimer_irq;
|
cpu_vtimer_irq = &cortexa_vtimer_irq;
|
||||||
|
|
|
@ -177,4 +177,9 @@ static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
|
||||||
return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE;
|
return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
return vcpu_sys_reg(vcpu, MPIDR_EL1);
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* __ARM64_KVM_EMULATE_H__ */
|
#endif /* __ARM64_KVM_EMULATE_H__ */
|
||||||
|
|
Загрузка…
Ссылка в новой задаче