Merge tag 'kvm-arm-for-3.14' of git://git.linaro.org/people/christoffer.dall/linux-kvm-arm into kvm-queue
This commit is contained in:
Коммит
ab53f22e2e
|
@ -2327,7 +2327,7 @@ current state. "addr" is ignored.
|
|||
Capability: basic
|
||||
Architectures: arm, arm64
|
||||
Type: vcpu ioctl
|
||||
Parameters: struct struct kvm_vcpu_init (in)
|
||||
Parameters: struct kvm_vcpu_init (in)
|
||||
Returns: 0 on success; -1 on error
|
||||
Errors:
|
||||
EINVAL: the target is unknown, or the combination of features is invalid.
|
||||
|
|
|
@ -140,6 +140,7 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
|
|||
}
|
||||
|
||||
#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
|
||||
#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
|
|
|
@ -489,15 +489,6 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle the "start in power-off" case by calling into the
|
||||
* PSCI code.
|
||||
*/
|
||||
if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) {
|
||||
*vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF;
|
||||
kvm_psci_call(vcpu);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -711,6 +702,24 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
|
||||
struct kvm_vcpu_init *init)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = kvm_vcpu_set_target(vcpu, init);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Handle the "start in power-off" case by marking the VCPU as paused.
|
||||
*/
|
||||
if (__test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
|
||||
vcpu->arch.pause = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
long kvm_arch_vcpu_ioctl(struct file *filp,
|
||||
unsigned int ioctl, unsigned long arg)
|
||||
{
|
||||
|
@ -724,8 +733,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
|||
if (copy_from_user(&init, argp, sizeof(init)))
|
||||
return -EFAULT;
|
||||
|
||||
return kvm_vcpu_set_target(vcpu, &init);
|
||||
|
||||
return kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
|
||||
}
|
||||
case KVM_SET_ONE_REG:
|
||||
case KVM_GET_ONE_REG: {
|
||||
|
|
|
@ -26,8 +26,6 @@
|
|||
|
||||
#include "trace.h"
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
|
||||
|
||||
static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
|
|
|
@ -667,14 +667,16 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
|
||||
} else {
|
||||
/*
|
||||
* Pages belonging to VMAs not aligned to the PMD mapping
|
||||
* granularity cannot be mapped using block descriptors even
|
||||
* if the pages belong to a THP for the process, because the
|
||||
* stage-2 block descriptor will cover more than a single THP
|
||||
* and we loose atomicity for unmapping, updates, and splits
|
||||
* of the THP or other pages in the stage-2 block range.
|
||||
* Pages belonging to memslots that don't have the same
|
||||
* alignment for userspace and IPA cannot be mapped using
|
||||
* block descriptors even if the pages belong to a THP for
|
||||
* the process, because the stage-2 block descriptor will
|
||||
* cover more than a single THP and we loose atomicity for
|
||||
* unmapping, updates, and splits of the THP or other pages
|
||||
* in the stage-2 block range.
|
||||
*/
|
||||
if (vma->vm_start & ~PMD_MASK)
|
||||
if ((memslot->userspace_addr & ~PMD_MASK) !=
|
||||
((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
|
||||
force_pte = true;
|
||||
}
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
@ -916,9 +918,9 @@ int kvm_mmu_init(void)
|
|||
{
|
||||
int err;
|
||||
|
||||
hyp_idmap_start = virt_to_phys(__hyp_idmap_text_start);
|
||||
hyp_idmap_end = virt_to_phys(__hyp_idmap_text_end);
|
||||
hyp_idmap_vector = virt_to_phys(__kvm_hyp_init);
|
||||
hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
|
||||
hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
|
||||
hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
|
||||
|
||||
if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) {
|
||||
/*
|
||||
|
@ -945,7 +947,7 @@ int kvm_mmu_init(void)
|
|||
*/
|
||||
kvm_flush_dcache_to_poc(init_bounce_page, len);
|
||||
|
||||
phys_base = virt_to_phys(init_bounce_page);
|
||||
phys_base = kvm_virt_to_phys(init_bounce_page);
|
||||
hyp_idmap_vector += phys_base - hyp_idmap_start;
|
||||
hyp_idmap_start = phys_base;
|
||||
hyp_idmap_end = phys_base + len;
|
||||
|
|
|
@ -54,15 +54,15 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
|||
}
|
||||
}
|
||||
|
||||
if (!vcpu)
|
||||
/*
|
||||
* Make sure the caller requested a valid CPU and that the CPU is
|
||||
* turned off.
|
||||
*/
|
||||
if (!vcpu || !vcpu->arch.pause)
|
||||
return KVM_PSCI_RET_INVAL;
|
||||
|
||||
target_pc = *vcpu_reg(source_vcpu, 2);
|
||||
|
||||
wq = kvm_arch_vcpu_wq(vcpu);
|
||||
if (!waitqueue_active(wq))
|
||||
return KVM_PSCI_RET_INVAL;
|
||||
|
||||
kvm_reset_vcpu(vcpu);
|
||||
|
||||
/* Gracefully handle Thumb2 entry point */
|
||||
|
@ -79,6 +79,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
|||
vcpu->arch.pause = false;
|
||||
smp_mb(); /* Make sure the above is visible */
|
||||
|
||||
wq = kvm_arch_vcpu_wq(vcpu);
|
||||
wake_up_interruptible(wq);
|
||||
|
||||
return KVM_PSCI_RET_SUCCESS;
|
||||
|
|
|
@ -136,6 +136,7 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
|
|||
}
|
||||
|
||||
#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
|
||||
#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ARM64_KVM_MMU_H__ */
|
||||
|
|
Загрузка…
Ссылка в новой задаче