kvm: powerpc: book3s: Allow the HV and PR selection per virtual machine
This moves the kvmppc_ops callbacks to be a per VM entity. This enables us to select HV and PR mode when creating a VM. We also allow both kvm-hv and kvm-pr kernel module to be loaded. To achieve this we move /dev/kvm ownership to kvm.ko module. Depending on which KVM mode we select during VM creation we take a reference count on respective module Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> [agraf: fix coding style] Signed-off-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
Родитель
5587027ce9
Коммит
cbbc58d4fd
|
@ -270,6 +270,7 @@ struct kvm_arch {
|
||||||
#ifdef CONFIG_KVM_XICS
|
#ifdef CONFIG_KVM_XICS
|
||||||
struct kvmppc_xics *xics;
|
struct kvmppc_xics *xics;
|
||||||
#endif
|
#endif
|
||||||
|
struct kvmppc_ops *kvm_ops;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -182,6 +182,7 @@ union kvmppc_one_reg {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct kvmppc_ops {
|
struct kvmppc_ops {
|
||||||
|
struct module *owner;
|
||||||
bool is_hv_enabled;
|
bool is_hv_enabled;
|
||||||
int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
|
int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
|
||||||
int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
|
int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
|
||||||
|
@ -217,7 +218,6 @@ struct kvmppc_ops {
|
||||||
unsigned long npages);
|
unsigned long npages);
|
||||||
int (*init_vm)(struct kvm *kvm);
|
int (*init_vm)(struct kvm *kvm);
|
||||||
void (*destroy_vm)(struct kvm *kvm);
|
void (*destroy_vm)(struct kvm *kvm);
|
||||||
int (*check_processor_compat)(void);
|
|
||||||
int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
|
int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
|
||||||
int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
unsigned int inst, int *advance);
|
unsigned int inst, int *advance);
|
||||||
|
@ -229,7 +229,8 @@ struct kvmppc_ops {
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct kvmppc_ops *kvmppc_ops;
|
extern struct kvmppc_ops *kvmppc_hv_ops;
|
||||||
|
extern struct kvmppc_ops *kvmppc_pr_ops;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Cuts out inst bits with ordering according to spec.
|
* Cuts out inst bits with ordering according to spec.
|
||||||
|
@ -326,7 +327,7 @@ static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
|
||||||
|
|
||||||
static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
|
static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
kvmppc_ops->fast_vcpu_kick(vcpu);
|
vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -213,16 +213,19 @@ static int __init kvmppc_44x_init(void)
|
||||||
if (r)
|
if (r)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
|
||||||
r = kvm_init(&kvm_ops_44x, sizeof(struct kvmppc_vcpu_44x),
|
r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE);
|
||||||
0, THIS_MODULE);
|
|
||||||
if (r)
|
if (r)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
kvm_ops_44x.owner = THIS_MODULE;
|
||||||
|
kvmppc_pr_ops = &kvm_ops_44x;
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit kvmppc_44x_exit(void)
|
static void __exit kvmppc_44x_exit(void)
|
||||||
{
|
{
|
||||||
|
kvmppc_pr_ops = NULL;
|
||||||
kvmppc_booke_exit();
|
kvmppc_booke_exit();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -34,6 +34,7 @@
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
#include <linux/highmem.h>
|
#include <linux/highmem.h>
|
||||||
|
|
||||||
|
#include "book3s.h"
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
|
|
||||||
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
|
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
|
||||||
|
@ -71,7 +72,7 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
|
static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
if (!kvmppc_ops->is_hv_enabled)
|
if (!vcpu->kvm->arch.kvm_ops->is_hv_enabled)
|
||||||
return to_book3s(vcpu)->hior;
|
return to_book3s(vcpu)->hior;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -79,7 +80,7 @@ static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
|
||||||
static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
|
static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
|
||||||
unsigned long pending_now, unsigned long old_pending)
|
unsigned long pending_now, unsigned long old_pending)
|
||||||
{
|
{
|
||||||
if (kvmppc_ops->is_hv_enabled)
|
if (vcpu->kvm->arch.kvm_ops->is_hv_enabled)
|
||||||
return;
|
return;
|
||||||
if (pending_now)
|
if (pending_now)
|
||||||
vcpu->arch.shared->int_pending = 1;
|
vcpu->arch.shared->int_pending = 1;
|
||||||
|
@ -93,7 +94,7 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
|
||||||
ulong crit_r1;
|
ulong crit_r1;
|
||||||
bool crit;
|
bool crit;
|
||||||
|
|
||||||
if (kvmppc_ops->is_hv_enabled)
|
if (vcpu->kvm->arch.kvm_ops->is_hv_enabled)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
crit_raw = vcpu->arch.shared->critical;
|
crit_raw = vcpu->arch.shared->critical;
|
||||||
|
@ -477,13 +478,13 @@ void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
|
||||||
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
|
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
|
||||||
struct kvm_sregs *sregs)
|
struct kvm_sregs *sregs)
|
||||||
{
|
{
|
||||||
return kvmppc_ops->get_sregs(vcpu, sregs);
|
return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
||||||
struct kvm_sregs *sregs)
|
struct kvm_sregs *sregs)
|
||||||
{
|
{
|
||||||
return kvmppc_ops->set_sregs(vcpu, sregs);
|
return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||||
|
@ -562,7 +563,7 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||||
if (size > sizeof(val))
|
if (size > sizeof(val))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
r = kvmppc_ops->get_one_reg(vcpu, reg->id, &val);
|
r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
|
||||||
if (r == -EINVAL) {
|
if (r == -EINVAL) {
|
||||||
r = 0;
|
r = 0;
|
||||||
switch (reg->id) {
|
switch (reg->id) {
|
||||||
|
@ -641,7 +642,7 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||||
if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
|
if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
r = kvmppc_ops->set_one_reg(vcpu, reg->id, &val);
|
r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
|
||||||
if (r == -EINVAL) {
|
if (r == -EINVAL) {
|
||||||
r = 0;
|
r = 0;
|
||||||
switch (reg->id) {
|
switch (reg->id) {
|
||||||
|
@ -702,23 +703,23 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||||
|
|
||||||
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||||
{
|
{
|
||||||
kvmppc_ops->vcpu_load(vcpu, cpu);
|
vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
|
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
kvmppc_ops->vcpu_put(vcpu);
|
vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
|
void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
|
||||||
{
|
{
|
||||||
kvmppc_ops->set_msr(vcpu, msr);
|
vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvmppc_set_msr);
|
EXPORT_SYMBOL_GPL(kvmppc_set_msr);
|
||||||
|
|
||||||
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return kvmppc_ops->vcpu_run(kvm_run, vcpu);
|
return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
|
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
|
||||||
|
@ -743,84 +744,84 @@ void kvmppc_decrementer_func(unsigned long data)
|
||||||
|
|
||||||
struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
|
struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||||
{
|
{
|
||||||
return kvmppc_ops->vcpu_create(kvm, id);
|
return kvm->arch.kvm_ops->vcpu_create(kvm, id);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
|
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
kvmppc_ops->vcpu_free(vcpu);
|
vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
|
int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return kvmppc_ops->check_requests(vcpu);
|
return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
|
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
|
||||||
{
|
{
|
||||||
return kvmppc_ops->get_dirty_log(kvm, log);
|
return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
|
void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
|
||||||
struct kvm_memory_slot *dont)
|
struct kvm_memory_slot *dont)
|
||||||
{
|
{
|
||||||
kvmppc_ops->free_memslot(free, dont);
|
kvm->arch.kvm_ops->free_memslot(free, dont);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||||
unsigned long npages)
|
unsigned long npages)
|
||||||
{
|
{
|
||||||
return kvmppc_ops->create_memslot(slot, npages);
|
return kvm->arch.kvm_ops->create_memslot(slot, npages);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
|
void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
|
||||||
{
|
{
|
||||||
kvmppc_ops->flush_memslot(kvm, memslot);
|
kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
|
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
|
||||||
struct kvm_memory_slot *memslot,
|
struct kvm_memory_slot *memslot,
|
||||||
struct kvm_userspace_memory_region *mem)
|
struct kvm_userspace_memory_region *mem)
|
||||||
{
|
{
|
||||||
return kvmppc_ops->prepare_memory_region(kvm, memslot, mem);
|
return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvmppc_core_commit_memory_region(struct kvm *kvm,
|
void kvmppc_core_commit_memory_region(struct kvm *kvm,
|
||||||
struct kvm_userspace_memory_region *mem,
|
struct kvm_userspace_memory_region *mem,
|
||||||
const struct kvm_memory_slot *old)
|
const struct kvm_memory_slot *old)
|
||||||
{
|
{
|
||||||
kvmppc_ops->commit_memory_region(kvm, mem, old);
|
kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
|
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
|
||||||
{
|
{
|
||||||
return kvmppc_ops->unmap_hva(kvm, hva);
|
return kvm->arch.kvm_ops->unmap_hva(kvm, hva);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_unmap_hva);
|
EXPORT_SYMBOL_GPL(kvm_unmap_hva);
|
||||||
|
|
||||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
return kvmppc_ops->unmap_hva_range(kvm, start, end);
|
return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_age_hva(struct kvm *kvm, unsigned long hva)
|
int kvm_age_hva(struct kvm *kvm, unsigned long hva)
|
||||||
{
|
{
|
||||||
return kvmppc_ops->age_hva(kvm, hva);
|
return kvm->arch.kvm_ops->age_hva(kvm, hva);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
|
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
|
||||||
{
|
{
|
||||||
return kvmppc_ops->test_age_hva(kvm, hva);
|
return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
|
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
|
||||||
{
|
{
|
||||||
kvmppc_ops->set_spte_hva(kvm, hva, pte);
|
kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
|
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
kvmppc_ops->mmu_destroy(vcpu);
|
vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvmppc_core_init_vm(struct kvm *kvm)
|
int kvmppc_core_init_vm(struct kvm *kvm)
|
||||||
|
@ -831,12 +832,12 @@ int kvmppc_core_init_vm(struct kvm *kvm)
|
||||||
INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
|
INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
return kvmppc_ops->init_vm(kvm);
|
return kvm->arch.kvm_ops->init_vm(kvm);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvmppc_core_destroy_vm(struct kvm *kvm)
|
void kvmppc_core_destroy_vm(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
kvmppc_ops->destroy_vm(kvm);
|
kvm->arch.kvm_ops->destroy_vm(kvm);
|
||||||
|
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
kvmppc_rtas_tokens_free(kvm);
|
kvmppc_rtas_tokens_free(kvm);
|
||||||
|
@ -846,5 +847,35 @@ void kvmppc_core_destroy_vm(struct kvm *kvm)
|
||||||
|
|
||||||
int kvmppc_core_check_processor_compat(void)
|
int kvmppc_core_check_processor_compat(void)
|
||||||
{
|
{
|
||||||
return kvmppc_ops->check_processor_compat();
|
/*
|
||||||
|
* We always return 0 for book3s. We check
|
||||||
|
* for compatability while loading the HV
|
||||||
|
* or PR module
|
||||||
|
*/
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int kvmppc_book3s_init(void)
|
||||||
|
{
|
||||||
|
int r;
|
||||||
|
|
||||||
|
r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
#ifdef CONFIG_KVM_BOOK3S_32
|
||||||
|
r = kvmppc_book3s_init_pr();
|
||||||
|
#endif
|
||||||
|
return r;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
static void kvmppc_book3s_exit(void)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_KVM_BOOK3S_32
|
||||||
|
kvmppc_book3s_exit_pr();
|
||||||
|
#endif
|
||||||
|
kvm_exit();
|
||||||
|
}
|
||||||
|
|
||||||
|
module_init(kvmppc_book3s_init);
|
||||||
|
module_exit(kvmppc_book3s_exit);
|
||||||
|
|
|
@ -28,5 +28,7 @@ extern int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu,
|
||||||
int sprn, ulong spr_val);
|
int sprn, ulong spr_val);
|
||||||
extern int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu,
|
extern int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu,
|
||||||
int sprn, ulong *spr_val);
|
int sprn, ulong *spr_val);
|
||||||
|
extern int kvmppc_book3s_init_pr(void);
|
||||||
|
extern void kvmppc_book3s_exit_pr(void);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -2159,7 +2159,7 @@ static long kvm_arch_vm_ioctl_hv(struct file *filp,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct kvmppc_ops kvmppc_hv_ops = {
|
static struct kvmppc_ops kvm_ops_hv = {
|
||||||
.is_hv_enabled = true,
|
.is_hv_enabled = true,
|
||||||
.get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
|
.get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
|
||||||
.set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
|
.set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
|
||||||
|
@ -2186,7 +2186,6 @@ static struct kvmppc_ops kvmppc_hv_ops = {
|
||||||
.create_memslot = kvmppc_core_create_memslot_hv,
|
.create_memslot = kvmppc_core_create_memslot_hv,
|
||||||
.init_vm = kvmppc_core_init_vm_hv,
|
.init_vm = kvmppc_core_init_vm_hv,
|
||||||
.destroy_vm = kvmppc_core_destroy_vm_hv,
|
.destroy_vm = kvmppc_core_destroy_vm_hv,
|
||||||
.check_processor_compat = kvmppc_core_check_processor_compat_hv,
|
|
||||||
.get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv,
|
.get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv,
|
||||||
.emulate_op = kvmppc_core_emulate_op_hv,
|
.emulate_op = kvmppc_core_emulate_op_hv,
|
||||||
.emulate_mtspr = kvmppc_core_emulate_mtspr_hv,
|
.emulate_mtspr = kvmppc_core_emulate_mtspr_hv,
|
||||||
|
@ -2198,20 +2197,23 @@ static struct kvmppc_ops kvmppc_hv_ops = {
|
||||||
static int kvmppc_book3s_init_hv(void)
|
static int kvmppc_book3s_init_hv(void)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
/*
|
||||||
r = kvm_init(&kvmppc_hv_ops, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
|
* FIXME!! Do we need to check on all cpus ?
|
||||||
|
*/
|
||||||
if (r)
|
r = kvmppc_core_check_processor_compat_hv();
|
||||||
|
if (r < 0)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = kvmppc_mmu_hv_init();
|
kvm_ops_hv.owner = THIS_MODULE;
|
||||||
|
kvmppc_hv_ops = &kvm_ops_hv;
|
||||||
|
|
||||||
|
r = kvmppc_mmu_hv_init();
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kvmppc_book3s_exit_hv(void)
|
static void kvmppc_book3s_exit_hv(void)
|
||||||
{
|
{
|
||||||
kvm_exit();
|
kvmppc_hv_ops = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
module_init(kvmppc_book3s_init_hv);
|
module_init(kvmppc_book3s_init_hv);
|
||||||
|
|
|
@ -1525,7 +1525,7 @@ static long kvm_arch_vm_ioctl_pr(struct file *filp,
|
||||||
return -ENOTTY;
|
return -ENOTTY;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct kvmppc_ops kvmppc_pr_ops = {
|
static struct kvmppc_ops kvm_ops_pr = {
|
||||||
.is_hv_enabled = false,
|
.is_hv_enabled = false,
|
||||||
.get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
|
.get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
|
||||||
.set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
|
.set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
|
||||||
|
@ -1552,7 +1552,6 @@ static struct kvmppc_ops kvmppc_pr_ops = {
|
||||||
.create_memslot = kvmppc_core_create_memslot_pr,
|
.create_memslot = kvmppc_core_create_memslot_pr,
|
||||||
.init_vm = kvmppc_core_init_vm_pr,
|
.init_vm = kvmppc_core_init_vm_pr,
|
||||||
.destroy_vm = kvmppc_core_destroy_vm_pr,
|
.destroy_vm = kvmppc_core_destroy_vm_pr,
|
||||||
.check_processor_compat = kvmppc_core_check_processor_compat_pr,
|
|
||||||
.get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
|
.get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
|
||||||
.emulate_op = kvmppc_core_emulate_op_pr,
|
.emulate_op = kvmppc_core_emulate_op_pr,
|
||||||
.emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
|
.emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
|
||||||
|
@ -1561,27 +1560,35 @@ static struct kvmppc_ops kvmppc_pr_ops = {
|
||||||
.arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
|
.arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int kvmppc_book3s_init_pr(void)
|
|
||||||
|
int kvmppc_book3s_init_pr(void)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
r = kvm_init(&kvmppc_pr_ops, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
|
r = kvmppc_core_check_processor_compat_pr();
|
||||||
|
if (r < 0)
|
||||||
if (r)
|
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = kvmppc_mmu_hpte_sysinit();
|
kvm_ops_pr.owner = THIS_MODULE;
|
||||||
|
kvmppc_pr_ops = &kvm_ops_pr;
|
||||||
|
|
||||||
|
r = kvmppc_mmu_hpte_sysinit();
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kvmppc_book3s_exit_pr(void)
|
void kvmppc_book3s_exit_pr(void)
|
||||||
{
|
{
|
||||||
|
kvmppc_pr_ops = NULL;
|
||||||
kvmppc_mmu_hpte_sysexit();
|
kvmppc_mmu_hpte_sysexit();
|
||||||
kvm_exit();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We only support separate modules for book3s 64
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
|
|
||||||
module_init(kvmppc_book3s_init_pr);
|
module_init(kvmppc_book3s_init_pr);
|
||||||
module_exit(kvmppc_book3s_exit_pr);
|
module_exit(kvmppc_book3s_exit_pr);
|
||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
#endif
|
||||||
|
|
|
@ -818,7 +818,7 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check for real mode returning too hard */
|
/* Check for real mode returning too hard */
|
||||||
if (xics->real_mode && kvmppc_ops->is_hv_enabled)
|
if (xics->real_mode && vcpu->kvm->arch.kvm_ops->is_hv_enabled)
|
||||||
return kvmppc_xics_rm_complete(vcpu, req);
|
return kvmppc_xics_rm_complete(vcpu, req);
|
||||||
|
|
||||||
switch (req) {
|
switch (req) {
|
||||||
|
|
|
@ -1472,7 +1472,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
get_sregs_base(vcpu, sregs);
|
get_sregs_base(vcpu, sregs);
|
||||||
get_sregs_arch206(vcpu, sregs);
|
get_sregs_arch206(vcpu, sregs);
|
||||||
return kvmppc_ops->get_sregs(vcpu, sregs);
|
return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
||||||
|
@ -1491,7 +1491,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
return kvmppc_ops->set_sregs(vcpu, sregs);
|
return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||||
|
@ -1548,7 +1548,7 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||||
val = get_reg_val(reg->id, vcpu->arch.vrsave);
|
val = get_reg_val(reg->id, vcpu->arch.vrsave);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
r = kvmppc_ops->get_one_reg(vcpu, reg->id, &val);
|
r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1631,7 +1631,7 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||||
vcpu->arch.vrsave = set_reg_val(reg->id, val);
|
vcpu->arch.vrsave = set_reg_val(reg->id, val);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
r = kvmppc_ops->set_one_reg(vcpu, reg->id, &val);
|
r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1911,37 +1911,37 @@ void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
|
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
kvmppc_ops->mmu_destroy(vcpu);
|
vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvmppc_core_init_vm(struct kvm *kvm)
|
int kvmppc_core_init_vm(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
return kvmppc_ops->init_vm(kvm);
|
return kvm->arch.kvm_ops->init_vm(kvm);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
|
struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||||
{
|
{
|
||||||
return kvmppc_ops->vcpu_create(kvm, id);
|
return kvm->arch.kvm_ops->vcpu_create(kvm, id);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
|
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
kvmppc_ops->vcpu_free(vcpu);
|
vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvmppc_core_destroy_vm(struct kvm *kvm)
|
void kvmppc_core_destroy_vm(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
kvmppc_ops->destroy_vm(kvm);
|
kvm->arch.kvm_ops->destroy_vm(kvm);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||||
{
|
{
|
||||||
kvmppc_ops->vcpu_load(vcpu, cpu);
|
vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
|
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
kvmppc_ops->vcpu_put(vcpu);
|
vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
int __init kvmppc_booke_init(void)
|
int __init kvmppc_booke_init(void)
|
||||||
|
|
|
@ -555,13 +555,19 @@ static int __init kvmppc_e500_init(void)
|
||||||
flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
|
flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
|
||||||
ivor[max_ivor] + handler_len);
|
ivor[max_ivor] + handler_len);
|
||||||
|
|
||||||
r = kvm_init(&kvm_ops_e500, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
|
r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
|
||||||
|
if (r)
|
||||||
|
goto err_out;
|
||||||
|
kvm_ops_e500.owner = THIS_MODULE;
|
||||||
|
kvmppc_pr_ops = &kvm_ops_e500;
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit kvmppc_e500_exit(void)
|
static void __exit kvmppc_e500_exit(void)
|
||||||
{
|
{
|
||||||
|
kvmppc_pr_ops = NULL;
|
||||||
kvmppc_booke_exit();
|
kvmppc_booke_exit();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -373,15 +373,19 @@ static int __init kvmppc_e500mc_init(void)
|
||||||
kvmppc_init_lpid(64);
|
kvmppc_init_lpid(64);
|
||||||
kvmppc_claim_lpid(0); /* host */
|
kvmppc_claim_lpid(0); /* host */
|
||||||
|
|
||||||
r = kvm_init(&kvm_ops_e500mc, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
|
r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
|
||||||
if (r)
|
if (r)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
kvm_ops_e500mc.owner = THIS_MODULE;
|
||||||
|
kvmppc_pr_ops = &kvm_ops_e500mc;
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit kvmppc_e500mc_exit(void)
|
static void __exit kvmppc_e500mc_exit(void)
|
||||||
{
|
{
|
||||||
|
kvmppc_pr_ops = NULL;
|
||||||
kvmppc_booke_exit();
|
kvmppc_booke_exit();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -130,8 +130,8 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
|
||||||
case SPRN_PIR: break;
|
case SPRN_PIR: break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
emulated = kvmppc_ops->emulate_mtspr(vcpu, sprn,
|
emulated = vcpu->kvm->arch.kvm_ops->emulate_mtspr(vcpu, sprn,
|
||||||
spr_val);
|
spr_val);
|
||||||
if (emulated == EMULATE_FAIL)
|
if (emulated == EMULATE_FAIL)
|
||||||
printk(KERN_INFO "mtspr: unknown spr "
|
printk(KERN_INFO "mtspr: unknown spr "
|
||||||
"0x%x\n", sprn);
|
"0x%x\n", sprn);
|
||||||
|
@ -191,8 +191,8 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
|
||||||
spr_val = kvmppc_get_dec(vcpu, get_tb());
|
spr_val = kvmppc_get_dec(vcpu, get_tb());
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
emulated = kvmppc_ops->emulate_mfspr(vcpu, sprn,
|
emulated = vcpu->kvm->arch.kvm_ops->emulate_mfspr(vcpu, sprn,
|
||||||
&spr_val);
|
&spr_val);
|
||||||
if (unlikely(emulated == EMULATE_FAIL)) {
|
if (unlikely(emulated == EMULATE_FAIL)) {
|
||||||
printk(KERN_INFO "mfspr: unknown spr "
|
printk(KERN_INFO "mfspr: unknown spr "
|
||||||
"0x%x\n", sprn);
|
"0x%x\n", sprn);
|
||||||
|
@ -464,7 +464,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (emulated == EMULATE_FAIL) {
|
if (emulated == EMULATE_FAIL) {
|
||||||
emulated = kvmppc_ops->emulate_op(run, vcpu, inst, &advance);
|
emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst,
|
||||||
|
&advance);
|
||||||
if (emulated == EMULATE_AGAIN) {
|
if (emulated == EMULATE_AGAIN) {
|
||||||
advance = 0;
|
advance = 0;
|
||||||
} else if (emulated == EMULATE_FAIL) {
|
} else if (emulated == EMULATE_FAIL) {
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/file.h>
|
#include <linux/file.h>
|
||||||
|
#include <linux/module.h>
|
||||||
#include <asm/cputable.h>
|
#include <asm/cputable.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
#include <asm/kvm_ppc.h>
|
#include <asm/kvm_ppc.h>
|
||||||
|
@ -39,7 +40,11 @@
|
||||||
#define CREATE_TRACE_POINTS
|
#define CREATE_TRACE_POINTS
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
|
|
||||||
struct kvmppc_ops *kvmppc_ops;
|
struct kvmppc_ops *kvmppc_hv_ops;
|
||||||
|
EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
|
||||||
|
struct kvmppc_ops *kvmppc_pr_ops;
|
||||||
|
EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
|
||||||
|
|
||||||
|
|
||||||
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
|
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
|
||||||
{
|
{
|
||||||
|
@ -195,7 +200,7 @@ int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/* HV KVM can only do PAPR mode for now */
|
/* HV KVM can only do PAPR mode for now */
|
||||||
if (!vcpu->arch.papr_enabled && kvmppc_ops->is_hv_enabled)
|
if (!vcpu->arch.papr_enabled && vcpu->kvm->arch.kvm_ops->is_hv_enabled)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_BOOKE_HV
|
#ifdef CONFIG_KVM_BOOKE_HV
|
||||||
|
@ -271,10 +276,35 @@ void kvm_arch_check_processor_compat(void *rtn)
|
||||||
|
|
||||||
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||||
{
|
{
|
||||||
if (type)
|
struct kvmppc_ops *kvm_ops = NULL;
|
||||||
return -EINVAL;
|
/*
|
||||||
|
* if we have both HV and PR enabled, default is HV
|
||||||
|
*/
|
||||||
|
if (type == 0) {
|
||||||
|
if (kvmppc_hv_ops)
|
||||||
|
kvm_ops = kvmppc_hv_ops;
|
||||||
|
else
|
||||||
|
kvm_ops = kvmppc_pr_ops;
|
||||||
|
if (!kvm_ops)
|
||||||
|
goto err_out;
|
||||||
|
} else if (type == KVM_VM_PPC_HV) {
|
||||||
|
if (!kvmppc_hv_ops)
|
||||||
|
goto err_out;
|
||||||
|
kvm_ops = kvmppc_hv_ops;
|
||||||
|
} else if (type == KVM_VM_PPC_PR) {
|
||||||
|
if (!kvmppc_pr_ops)
|
||||||
|
goto err_out;
|
||||||
|
kvm_ops = kvmppc_pr_ops;
|
||||||
|
} else
|
||||||
|
goto err_out;
|
||||||
|
|
||||||
|
if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
|
||||||
|
return -ENOENT;
|
||||||
|
|
||||||
|
kvm->arch.kvm_ops = kvm_ops;
|
||||||
return kvmppc_core_init_vm(kvm);
|
return kvmppc_core_init_vm(kvm);
|
||||||
|
err_out:
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_arch_destroy_vm(struct kvm *kvm)
|
void kvm_arch_destroy_vm(struct kvm *kvm)
|
||||||
|
@ -294,6 +324,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
|
||||||
kvmppc_core_destroy_vm(kvm);
|
kvmppc_core_destroy_vm(kvm);
|
||||||
|
|
||||||
mutex_unlock(&kvm->lock);
|
mutex_unlock(&kvm->lock);
|
||||||
|
|
||||||
|
/* drop the module reference */
|
||||||
|
module_put(kvm->arch.kvm_ops->owner);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_arch_sync_events(struct kvm *kvm)
|
void kvm_arch_sync_events(struct kvm *kvm)
|
||||||
|
@ -303,6 +336,10 @@ void kvm_arch_sync_events(struct kvm *kvm)
|
||||||
int kvm_dev_ioctl_check_extension(long ext)
|
int kvm_dev_ioctl_check_extension(long ext)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
/* FIXME!!
|
||||||
|
* Should some of this be vm ioctl ? is it possible now ?
|
||||||
|
*/
|
||||||
|
int hv_enabled = kvmppc_hv_ops ? 1 : 0;
|
||||||
|
|
||||||
switch (ext) {
|
switch (ext) {
|
||||||
#ifdef CONFIG_BOOKE
|
#ifdef CONFIG_BOOKE
|
||||||
|
@ -329,7 +366,7 @@ int kvm_dev_ioctl_check_extension(long ext)
|
||||||
case KVM_CAP_SW_TLB:
|
case KVM_CAP_SW_TLB:
|
||||||
#endif
|
#endif
|
||||||
/* We support this only for PR */
|
/* We support this only for PR */
|
||||||
r = !kvmppc_ops->is_hv_enabled;
|
r = !hv_enabled;
|
||||||
break;
|
break;
|
||||||
#ifdef CONFIG_KVM_MMIO
|
#ifdef CONFIG_KVM_MMIO
|
||||||
case KVM_CAP_COALESCED_MMIO:
|
case KVM_CAP_COALESCED_MMIO:
|
||||||
|
@ -354,13 +391,13 @@ int kvm_dev_ioctl_check_extension(long ext)
|
||||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||||
case KVM_CAP_PPC_SMT:
|
case KVM_CAP_PPC_SMT:
|
||||||
if (kvmppc_ops->is_hv_enabled)
|
if (hv_enabled)
|
||||||
r = threads_per_core;
|
r = threads_per_core;
|
||||||
else
|
else
|
||||||
r = 0;
|
r = 0;
|
||||||
break;
|
break;
|
||||||
case KVM_CAP_PPC_RMA:
|
case KVM_CAP_PPC_RMA:
|
||||||
r = kvmppc_ops->is_hv_enabled;
|
r = hv_enabled;
|
||||||
/* PPC970 requires an RMA */
|
/* PPC970 requires an RMA */
|
||||||
if (r && cpu_has_feature(CPU_FTR_ARCH_201))
|
if (r && cpu_has_feature(CPU_FTR_ARCH_201))
|
||||||
r = 2;
|
r = 2;
|
||||||
|
@ -368,7 +405,7 @@ int kvm_dev_ioctl_check_extension(long ext)
|
||||||
#endif
|
#endif
|
||||||
case KVM_CAP_SYNC_MMU:
|
case KVM_CAP_SYNC_MMU:
|
||||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||||
if (kvmppc_ops->is_hv_enabled)
|
if (hv_enabled)
|
||||||
r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
|
r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
|
||||||
else
|
else
|
||||||
r = 0;
|
r = 0;
|
||||||
|
@ -380,7 +417,7 @@ int kvm_dev_ioctl_check_extension(long ext)
|
||||||
break;
|
break;
|
||||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||||
case KVM_CAP_PPC_HTAB_FD:
|
case KVM_CAP_PPC_HTAB_FD:
|
||||||
r = kvmppc_ops->is_hv_enabled;
|
r = hv_enabled;
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
case KVM_CAP_NR_VCPUS:
|
case KVM_CAP_NR_VCPUS:
|
||||||
|
@ -390,7 +427,7 @@ int kvm_dev_ioctl_check_extension(long ext)
|
||||||
* will have secondary threads "offline"), and for other KVM
|
* will have secondary threads "offline"), and for other KVM
|
||||||
* implementations just count online CPUs.
|
* implementations just count online CPUs.
|
||||||
*/
|
*/
|
||||||
if (kvmppc_ops->is_hv_enabled)
|
if (hv_enabled)
|
||||||
r = num_present_cpus();
|
r = num_present_cpus();
|
||||||
else
|
else
|
||||||
r = num_online_cpus();
|
r = num_online_cpus();
|
||||||
|
@ -1039,9 +1076,10 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
||||||
}
|
}
|
||||||
case KVM_PPC_GET_SMMU_INFO: {
|
case KVM_PPC_GET_SMMU_INFO: {
|
||||||
struct kvm_ppc_smmu_info info;
|
struct kvm_ppc_smmu_info info;
|
||||||
|
struct kvm *kvm = filp->private_data;
|
||||||
|
|
||||||
memset(&info, 0, sizeof(info));
|
memset(&info, 0, sizeof(info));
|
||||||
r = kvmppc_ops->get_smmu_info(kvm, &info);
|
r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
|
||||||
if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
|
if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
|
||||||
r = -EFAULT;
|
r = -EFAULT;
|
||||||
break;
|
break;
|
||||||
|
@ -1052,9 +1090,10 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
||||||
r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
|
r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default: {
|
||||||
r = kvmppc_ops->arch_vm_ioctl(filp, ioctl, arg);
|
struct kvm *kvm = filp->private_data;
|
||||||
|
r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
|
||||||
|
}
|
||||||
#else /* CONFIG_PPC_BOOK3S_64 */
|
#else /* CONFIG_PPC_BOOK3S_64 */
|
||||||
default:
|
default:
|
||||||
r = -ENOTTY;
|
r = -ENOTTY;
|
||||||
|
@ -1104,15 +1143,10 @@ EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
|
||||||
|
|
||||||
int kvm_arch_init(void *opaque)
|
int kvm_arch_init(void *opaque)
|
||||||
{
|
{
|
||||||
if (kvmppc_ops) {
|
|
||||||
printk(KERN_ERR "kvm: already loaded the other module\n");
|
|
||||||
return -EEXIST;
|
|
||||||
}
|
|
||||||
kvmppc_ops = (struct kvmppc_ops *)opaque;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_arch_exit(void)
|
void kvm_arch_exit(void)
|
||||||
{
|
{
|
||||||
kvmppc_ops = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -518,6 +518,10 @@ struct kvm_ppc_smmu_info {
|
||||||
/* machine type bits, to be used as argument to KVM_CREATE_VM */
|
/* machine type bits, to be used as argument to KVM_CREATE_VM */
|
||||||
#define KVM_VM_S390_UCONTROL 1
|
#define KVM_VM_S390_UCONTROL 1
|
||||||
|
|
||||||
|
/* on ppc, 0 indicate default, 1 should force HV and 2 PR */
|
||||||
|
#define KVM_VM_PPC_HV 1
|
||||||
|
#define KVM_VM_PPC_PR 2
|
||||||
|
|
||||||
#define KVM_S390_SIE_PAGE_OFFSET 1
|
#define KVM_S390_SIE_PAGE_OFFSET 1
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Загрузка…
Ссылка в новой задаче